1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/module.h>
21#include <linux/interrupt.h>
22#include <linux/types.h>
23#include <linux/pci.h>
24#include <linux/pci-aspm.h>
25#include <linux/kernel.h>
26#include <linux/slab.h>
27#include <linux/delay.h>
28#include <linux/fs.h>
29#include <linux/timer.h>
30#include <linux/init.h>
31#include <linux/spinlock.h>
32#include <linux/compat.h>
33#include <linux/blktrace_api.h>
34#include <linux/uaccess.h>
35#include <linux/io.h>
36#include <linux/dma-mapping.h>
37#include <linux/completion.h>
38#include <linux/moduleparam.h>
39#include <scsi/scsi.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/scsi_device.h>
42#include <scsi/scsi_host.h>
43#include <scsi/scsi_tcq.h>
44#include <scsi/scsi_eh.h>
45#include <scsi/scsi_transport_sas.h>
46#include <scsi/scsi_dbg.h>
47#include <linux/cciss_ioctl.h>
48#include <linux/string.h>
49#include <linux/bitmap.h>
50#include <linux/atomic.h>
51#include <linux/jiffies.h>
52#include <linux/percpu-defs.h>
53#include <linux/percpu.h>
54#include <asm/unaligned.h>
55#include <asm/div64.h>
56#include "hpsa_cmd.h"
57#include "hpsa.h"
58
59
60
61
62
63#define HPSA_DRIVER_VERSION "3.4.20-170"
64#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
65#define HPSA "hpsa"
66
67
68#define CLEAR_EVENT_WAIT_INTERVAL 20
69#define MODE_CHANGE_WAIT_INTERVAL 10
70#define MAX_CLEAR_EVENT_WAIT 30000
71#define MAX_MODE_CHANGE_WAIT 2000
72#define MAX_IOCTL_CONFIG_WAIT 1000
73
74
75#define MAX_CMD_RETRIES 3
76
77#define HPSA_EH_PTRAID_TIMEOUT (240 * HZ)
78
79
80MODULE_AUTHOR("Hewlett-Packard Company");
81MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
82 HPSA_DRIVER_VERSION);
83MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
84MODULE_VERSION(HPSA_DRIVER_VERSION);
85MODULE_LICENSE("GPL");
86MODULE_ALIAS("cciss");
87
88static int hpsa_simple_mode;
89module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
90MODULE_PARM_DESC(hpsa_simple_mode,
91 "Use 'simple mode' rather than 'performant mode'");
92
93
94static const struct pci_device_id hpsa_pci_device_id[] = {
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1920},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1925},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
132 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
133 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
134 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
135 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
136 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
137 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
138 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
139 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
140 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
141 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
142 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
143 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
144 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
145 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
146 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
147 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
148 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
149 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
150 {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
151 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
152 {0,}
153};
154
155MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
156
157
158
159
160
161static struct board_type products[] = {
162 {0x40700E11, "Smart Array 5300", &SA5A_access},
163 {0x40800E11, "Smart Array 5i", &SA5B_access},
164 {0x40820E11, "Smart Array 532", &SA5B_access},
165 {0x40830E11, "Smart Array 5312", &SA5B_access},
166 {0x409A0E11, "Smart Array 641", &SA5A_access},
167 {0x409B0E11, "Smart Array 642", &SA5A_access},
168 {0x409C0E11, "Smart Array 6400", &SA5A_access},
169 {0x409D0E11, "Smart Array 6400 EM", &SA5A_access},
170 {0x40910E11, "Smart Array 6i", &SA5A_access},
171 {0x3225103C, "Smart Array P600", &SA5A_access},
172 {0x3223103C, "Smart Array P800", &SA5A_access},
173 {0x3234103C, "Smart Array P400", &SA5A_access},
174 {0x3235103C, "Smart Array P400i", &SA5A_access},
175 {0x3211103C, "Smart Array E200i", &SA5A_access},
176 {0x3212103C, "Smart Array E200", &SA5A_access},
177 {0x3213103C, "Smart Array E200i", &SA5A_access},
178 {0x3214103C, "Smart Array E200i", &SA5A_access},
179 {0x3215103C, "Smart Array E200i", &SA5A_access},
180 {0x3237103C, "Smart Array E500", &SA5A_access},
181 {0x323D103C, "Smart Array P700m", &SA5A_access},
182 {0x3241103C, "Smart Array P212", &SA5_access},
183 {0x3243103C, "Smart Array P410", &SA5_access},
184 {0x3245103C, "Smart Array P410i", &SA5_access},
185 {0x3247103C, "Smart Array P411", &SA5_access},
186 {0x3249103C, "Smart Array P812", &SA5_access},
187 {0x324A103C, "Smart Array P712m", &SA5_access},
188 {0x324B103C, "Smart Array P711m", &SA5_access},
189 {0x3233103C, "HP StorageWorks 1210m", &SA5_access},
190 {0x3350103C, "Smart Array P222", &SA5_access},
191 {0x3351103C, "Smart Array P420", &SA5_access},
192 {0x3352103C, "Smart Array P421", &SA5_access},
193 {0x3353103C, "Smart Array P822", &SA5_access},
194 {0x3354103C, "Smart Array P420i", &SA5_access},
195 {0x3355103C, "Smart Array P220i", &SA5_access},
196 {0x3356103C, "Smart Array P721m", &SA5_access},
197 {0x1920103C, "Smart Array P430i", &SA5_access},
198 {0x1921103C, "Smart Array P830i", &SA5_access},
199 {0x1922103C, "Smart Array P430", &SA5_access},
200 {0x1923103C, "Smart Array P431", &SA5_access},
201 {0x1924103C, "Smart Array P830", &SA5_access},
202 {0x1925103C, "Smart Array P831", &SA5_access},
203 {0x1926103C, "Smart Array P731m", &SA5_access},
204 {0x1928103C, "Smart Array P230i", &SA5_access},
205 {0x1929103C, "Smart Array P530", &SA5_access},
206 {0x21BD103C, "Smart Array P244br", &SA5_access},
207 {0x21BE103C, "Smart Array P741m", &SA5_access},
208 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
209 {0x21C0103C, "Smart Array P440ar", &SA5_access},
210 {0x21C1103C, "Smart Array P840ar", &SA5_access},
211 {0x21C2103C, "Smart Array P440", &SA5_access},
212 {0x21C3103C, "Smart Array P441", &SA5_access},
213 {0x21C4103C, "Smart Array", &SA5_access},
214 {0x21C5103C, "Smart Array P841", &SA5_access},
215 {0x21C6103C, "Smart HBA H244br", &SA5_access},
216 {0x21C7103C, "Smart HBA H240", &SA5_access},
217 {0x21C8103C, "Smart HBA H241", &SA5_access},
218 {0x21C9103C, "Smart Array", &SA5_access},
219 {0x21CA103C, "Smart Array P246br", &SA5_access},
220 {0x21CB103C, "Smart Array P840", &SA5_access},
221 {0x21CC103C, "Smart Array", &SA5_access},
222 {0x21CD103C, "Smart Array", &SA5_access},
223 {0x21CE103C, "Smart HBA", &SA5_access},
224 {0x05809005, "SmartHBA-SA", &SA5_access},
225 {0x05819005, "SmartHBA-SA 8i", &SA5_access},
226 {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
227 {0x05839005, "SmartHBA-SA 8e", &SA5_access},
228 {0x05849005, "SmartHBA-SA 16i", &SA5_access},
229 {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
230 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
231 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
232 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
233 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
234 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
235 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
236};
237
238static struct scsi_transport_template *hpsa_sas_transport_template;
239static int hpsa_add_sas_host(struct ctlr_info *h);
240static void hpsa_delete_sas_host(struct ctlr_info *h);
241static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
242 struct hpsa_scsi_dev_t *device);
243static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device);
244static struct hpsa_scsi_dev_t
245 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
246 struct sas_rphy *rphy);
247
248#define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
249static const struct scsi_cmnd hpsa_cmd_busy;
250#define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
251static const struct scsi_cmnd hpsa_cmd_idle;
252static int number_of_controllers;
253
254static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
255static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
256static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
257 void __user *arg);
258
259#ifdef CONFIG_COMPAT
260static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
261 void __user *arg);
262#endif
263
264static void cmd_free(struct ctlr_info *h, struct CommandList *c);
265static struct CommandList *cmd_alloc(struct ctlr_info *h);
266static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
267static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
268 struct scsi_cmnd *scmd);
269static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
270 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
271 int cmd_type);
272static void hpsa_free_cmd_pool(struct ctlr_info *h);
273#define VPD_PAGE (1 << 8)
274#define HPSA_SIMPLE_ERROR_BITS 0x03
275
276static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
277static void hpsa_scan_start(struct Scsi_Host *);
278static int hpsa_scan_finished(struct Scsi_Host *sh,
279 unsigned long elapsed_time);
280static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
281
282static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
283static int hpsa_slave_alloc(struct scsi_device *sdev);
284static int hpsa_slave_configure(struct scsi_device *sdev);
285static void hpsa_slave_destroy(struct scsi_device *sdev);
286
287static void hpsa_update_scsi_devices(struct ctlr_info *h);
288static int check_for_unit_attention(struct ctlr_info *h,
289 struct CommandList *c);
290static void check_ioctl_unit_attention(struct ctlr_info *h,
291 struct CommandList *c);
292
293static void calc_bucket_map(int *bucket, int num_buckets,
294 int nsgs, int min_blocks, u32 *bucket_map);
295static void hpsa_free_performant_mode(struct ctlr_info *h);
296static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
297static inline u32 next_command(struct ctlr_info *h, u8 q);
298static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
299 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
300 u64 *cfg_offset);
301static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
302 unsigned long *memory_bar);
303static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
304 bool *legacy_board);
305static int wait_for_device_to_become_ready(struct ctlr_info *h,
306 unsigned char lunaddr[],
307 int reply_queue);
308static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
309 int wait_for_ready);
310static inline void finish_cmd(struct CommandList *c);
311static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
312#define BOARD_NOT_READY 0
313#define BOARD_READY 1
314static void hpsa_drain_accel_commands(struct ctlr_info *h);
315static void hpsa_flush_cache(struct ctlr_info *h);
316static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
317 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
318 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
319static void hpsa_command_resubmit_worker(struct work_struct *work);
320static u32 lockup_detected(struct ctlr_info *h);
321static int detect_controller_lockup(struct ctlr_info *h);
322static void hpsa_disable_rld_caching(struct ctlr_info *h);
323static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
324 struct ReportExtendedLUNdata *buf, int bufsize);
325static bool hpsa_vpd_page_supported(struct ctlr_info *h,
326 unsigned char scsi3addr[], u8 page);
327static int hpsa_luns_changed(struct ctlr_info *h);
328static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
329 struct hpsa_scsi_dev_t *dev,
330 unsigned char *scsi3addr);
331
332static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
333{
334 unsigned long *priv = shost_priv(sdev->host);
335 return (struct ctlr_info *) *priv;
336}
337
338static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
339{
340 unsigned long *priv = shost_priv(sh);
341 return (struct ctlr_info *) *priv;
342}
343
344static inline bool hpsa_is_cmd_idle(struct CommandList *c)
345{
346 return c->scsi_cmd == SCSI_CMD_IDLE;
347}
348
349
350static void decode_sense_data(const u8 *sense_data, int sense_data_len,
351 u8 *sense_key, u8 *asc, u8 *ascq)
352{
353 struct scsi_sense_hdr sshdr;
354 bool rc;
355
356 *sense_key = -1;
357 *asc = -1;
358 *ascq = -1;
359
360 if (sense_data_len < 1)
361 return;
362
363 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
364 if (rc) {
365 *sense_key = sshdr.sense_key;
366 *asc = sshdr.asc;
367 *ascq = sshdr.ascq;
368 }
369}
370
371static int check_for_unit_attention(struct ctlr_info *h,
372 struct CommandList *c)
373{
374 u8 sense_key, asc, ascq;
375 int sense_len;
376
377 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
378 sense_len = sizeof(c->err_info->SenseInfo);
379 else
380 sense_len = c->err_info->SenseLen;
381
382 decode_sense_data(c->err_info->SenseInfo, sense_len,
383 &sense_key, &asc, &ascq);
384 if (sense_key != UNIT_ATTENTION || asc == 0xff)
385 return 0;
386
387 switch (asc) {
388 case STATE_CHANGED:
389 dev_warn(&h->pdev->dev,
390 "%s: a state change detected, command retried\n",
391 h->devname);
392 break;
393 case LUN_FAILED:
394 dev_warn(&h->pdev->dev,
395 "%s: LUN failure detected\n", h->devname);
396 break;
397 case REPORT_LUNS_CHANGED:
398 dev_warn(&h->pdev->dev,
399 "%s: report LUN data changed\n", h->devname);
400
401
402
403
404 break;
405 case POWER_OR_RESET:
406 dev_warn(&h->pdev->dev,
407 "%s: a power on or device reset detected\n",
408 h->devname);
409 break;
410 case UNIT_ATTENTION_CLEARED:
411 dev_warn(&h->pdev->dev,
412 "%s: unit attention cleared by another initiator\n",
413 h->devname);
414 break;
415 default:
416 dev_warn(&h->pdev->dev,
417 "%s: unknown unit attention detected\n",
418 h->devname);
419 break;
420 }
421 return 1;
422}
423
424static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
425{
426 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
427 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
428 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
429 return 0;
430 dev_warn(&h->pdev->dev, HPSA "device busy");
431 return 1;
432}
433
434static u32 lockup_detected(struct ctlr_info *h);
435static ssize_t host_show_lockup_detected(struct device *dev,
436 struct device_attribute *attr, char *buf)
437{
438 int ld;
439 struct ctlr_info *h;
440 struct Scsi_Host *shost = class_to_shost(dev);
441
442 h = shost_to_hba(shost);
443 ld = lockup_detected(h);
444
445 return sprintf(buf, "ld=%d\n", ld);
446}
447
448static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
449 struct device_attribute *attr,
450 const char *buf, size_t count)
451{
452 int status, len;
453 struct ctlr_info *h;
454 struct Scsi_Host *shost = class_to_shost(dev);
455 char tmpbuf[10];
456
457 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
458 return -EACCES;
459 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
460 strncpy(tmpbuf, buf, len);
461 tmpbuf[len] = '\0';
462 if (sscanf(tmpbuf, "%d", &status) != 1)
463 return -EINVAL;
464 h = shost_to_hba(shost);
465 h->acciopath_status = !!status;
466 dev_warn(&h->pdev->dev,
467 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
468 h->acciopath_status ? "enabled" : "disabled");
469 return count;
470}
471
472static ssize_t host_store_raid_offload_debug(struct device *dev,
473 struct device_attribute *attr,
474 const char *buf, size_t count)
475{
476 int debug_level, len;
477 struct ctlr_info *h;
478 struct Scsi_Host *shost = class_to_shost(dev);
479 char tmpbuf[10];
480
481 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
482 return -EACCES;
483 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
484 strncpy(tmpbuf, buf, len);
485 tmpbuf[len] = '\0';
486 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
487 return -EINVAL;
488 if (debug_level < 0)
489 debug_level = 0;
490 h = shost_to_hba(shost);
491 h->raid_offload_debug = debug_level;
492 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
493 h->raid_offload_debug);
494 return count;
495}
496
497static ssize_t host_store_rescan(struct device *dev,
498 struct device_attribute *attr,
499 const char *buf, size_t count)
500{
501 struct ctlr_info *h;
502 struct Scsi_Host *shost = class_to_shost(dev);
503 h = shost_to_hba(shost);
504 hpsa_scan_start(h->scsi_host);
505 return count;
506}
507
508static ssize_t host_show_firmware_revision(struct device *dev,
509 struct device_attribute *attr, char *buf)
510{
511 struct ctlr_info *h;
512 struct Scsi_Host *shost = class_to_shost(dev);
513 unsigned char *fwrev;
514
515 h = shost_to_hba(shost);
516 if (!h->hba_inquiry_data)
517 return 0;
518 fwrev = &h->hba_inquiry_data[32];
519 return snprintf(buf, 20, "%c%c%c%c\n",
520 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
521}
522
523static ssize_t host_show_commands_outstanding(struct device *dev,
524 struct device_attribute *attr, char *buf)
525{
526 struct Scsi_Host *shost = class_to_shost(dev);
527 struct ctlr_info *h = shost_to_hba(shost);
528
529 return snprintf(buf, 20, "%d\n",
530 atomic_read(&h->commands_outstanding));
531}
532
533static ssize_t host_show_transport_mode(struct device *dev,
534 struct device_attribute *attr, char *buf)
535{
536 struct ctlr_info *h;
537 struct Scsi_Host *shost = class_to_shost(dev);
538
539 h = shost_to_hba(shost);
540 return snprintf(buf, 20, "%s\n",
541 h->transMethod & CFGTBL_Trans_Performant ?
542 "performant" : "simple");
543}
544
545static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
546 struct device_attribute *attr, char *buf)
547{
548 struct ctlr_info *h;
549 struct Scsi_Host *shost = class_to_shost(dev);
550
551 h = shost_to_hba(shost);
552 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
553 (h->acciopath_status == 1) ? "enabled" : "disabled");
554}
555
556
557static u32 unresettable_controller[] = {
558 0x324a103C,
559 0x324b103C,
560 0x3223103C,
561 0x3234103C,
562 0x3235103C,
563 0x3211103C,
564 0x3212103C,
565 0x3213103C,
566 0x3214103C,
567 0x3215103C,
568 0x3237103C,
569 0x323D103C,
570 0x40800E11,
571 0x409C0E11,
572 0x409D0E11,
573 0x40700E11,
574 0x40820E11,
575 0x40830E11,
576 0x409A0E11,
577 0x409B0E11,
578 0x40910E11,
579};
580
581
582static u32 soft_unresettable_controller[] = {
583 0x40800E11,
584 0x40700E11,
585 0x40820E11,
586 0x40830E11,
587 0x409A0E11,
588 0x409B0E11,
589 0x40910E11,
590
591
592
593
594
595
596
597 0x409C0E11,
598 0x409D0E11,
599};
600
601static int board_id_in_array(u32 a[], int nelems, u32 board_id)
602{
603 int i;
604
605 for (i = 0; i < nelems; i++)
606 if (a[i] == board_id)
607 return 1;
608 return 0;
609}
610
611static int ctlr_is_hard_resettable(u32 board_id)
612{
613 return !board_id_in_array(unresettable_controller,
614 ARRAY_SIZE(unresettable_controller), board_id);
615}
616
617static int ctlr_is_soft_resettable(u32 board_id)
618{
619 return !board_id_in_array(soft_unresettable_controller,
620 ARRAY_SIZE(soft_unresettable_controller), board_id);
621}
622
623static int ctlr_is_resettable(u32 board_id)
624{
625 return ctlr_is_hard_resettable(board_id) ||
626 ctlr_is_soft_resettable(board_id);
627}
628
629static ssize_t host_show_resettable(struct device *dev,
630 struct device_attribute *attr, char *buf)
631{
632 struct ctlr_info *h;
633 struct Scsi_Host *shost = class_to_shost(dev);
634
635 h = shost_to_hba(shost);
636 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
637}
638
639static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
640{
641 return (scsi3addr[3] & 0xC0) == 0x40;
642}
643
644static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
645 "1(+0)ADM", "UNKNOWN", "PHYS DRV"
646};
647#define HPSA_RAID_0 0
648#define HPSA_RAID_4 1
649#define HPSA_RAID_1 2
650#define HPSA_RAID_5 3
651#define HPSA_RAID_51 4
652#define HPSA_RAID_6 5
653#define HPSA_RAID_ADM 6
654#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
655#define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
656
657static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
658{
659 return !device->physical_device;
660}
661
662static ssize_t raid_level_show(struct device *dev,
663 struct device_attribute *attr, char *buf)
664{
665 ssize_t l = 0;
666 unsigned char rlevel;
667 struct ctlr_info *h;
668 struct scsi_device *sdev;
669 struct hpsa_scsi_dev_t *hdev;
670 unsigned long flags;
671
672 sdev = to_scsi_device(dev);
673 h = sdev_to_hba(sdev);
674 spin_lock_irqsave(&h->lock, flags);
675 hdev = sdev->hostdata;
676 if (!hdev) {
677 spin_unlock_irqrestore(&h->lock, flags);
678 return -ENODEV;
679 }
680
681
682 if (!is_logical_device(hdev)) {
683 spin_unlock_irqrestore(&h->lock, flags);
684 l = snprintf(buf, PAGE_SIZE, "N/A\n");
685 return l;
686 }
687
688 rlevel = hdev->raid_level;
689 spin_unlock_irqrestore(&h->lock, flags);
690 if (rlevel > RAID_UNKNOWN)
691 rlevel = RAID_UNKNOWN;
692 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
693 return l;
694}
695
696static ssize_t lunid_show(struct device *dev,
697 struct device_attribute *attr, char *buf)
698{
699 struct ctlr_info *h;
700 struct scsi_device *sdev;
701 struct hpsa_scsi_dev_t *hdev;
702 unsigned long flags;
703 unsigned char lunid[8];
704
705 sdev = to_scsi_device(dev);
706 h = sdev_to_hba(sdev);
707 spin_lock_irqsave(&h->lock, flags);
708 hdev = sdev->hostdata;
709 if (!hdev) {
710 spin_unlock_irqrestore(&h->lock, flags);
711 return -ENODEV;
712 }
713 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
714 spin_unlock_irqrestore(&h->lock, flags);
715 return snprintf(buf, 20, "0x%8phN\n", lunid);
716}
717
718static ssize_t unique_id_show(struct device *dev,
719 struct device_attribute *attr, char *buf)
720{
721 struct ctlr_info *h;
722 struct scsi_device *sdev;
723 struct hpsa_scsi_dev_t *hdev;
724 unsigned long flags;
725 unsigned char sn[16];
726
727 sdev = to_scsi_device(dev);
728 h = sdev_to_hba(sdev);
729 spin_lock_irqsave(&h->lock, flags);
730 hdev = sdev->hostdata;
731 if (!hdev) {
732 spin_unlock_irqrestore(&h->lock, flags);
733 return -ENODEV;
734 }
735 memcpy(sn, hdev->device_id, sizeof(sn));
736 spin_unlock_irqrestore(&h->lock, flags);
737 return snprintf(buf, 16 * 2 + 2,
738 "%02X%02X%02X%02X%02X%02X%02X%02X"
739 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
740 sn[0], sn[1], sn[2], sn[3],
741 sn[4], sn[5], sn[6], sn[7],
742 sn[8], sn[9], sn[10], sn[11],
743 sn[12], sn[13], sn[14], sn[15]);
744}
745
746static ssize_t sas_address_show(struct device *dev,
747 struct device_attribute *attr, char *buf)
748{
749 struct ctlr_info *h;
750 struct scsi_device *sdev;
751 struct hpsa_scsi_dev_t *hdev;
752 unsigned long flags;
753 u64 sas_address;
754
755 sdev = to_scsi_device(dev);
756 h = sdev_to_hba(sdev);
757 spin_lock_irqsave(&h->lock, flags);
758 hdev = sdev->hostdata;
759 if (!hdev || is_logical_device(hdev) || !hdev->expose_device) {
760 spin_unlock_irqrestore(&h->lock, flags);
761 return -ENODEV;
762 }
763 sas_address = hdev->sas_address;
764 spin_unlock_irqrestore(&h->lock, flags);
765
766 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address);
767}
768
769static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
770 struct device_attribute *attr, char *buf)
771{
772 struct ctlr_info *h;
773 struct scsi_device *sdev;
774 struct hpsa_scsi_dev_t *hdev;
775 unsigned long flags;
776 int offload_enabled;
777
778 sdev = to_scsi_device(dev);
779 h = sdev_to_hba(sdev);
780 spin_lock_irqsave(&h->lock, flags);
781 hdev = sdev->hostdata;
782 if (!hdev) {
783 spin_unlock_irqrestore(&h->lock, flags);
784 return -ENODEV;
785 }
786 offload_enabled = hdev->offload_enabled;
787 spin_unlock_irqrestore(&h->lock, flags);
788
789 if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC)
790 return snprintf(buf, 20, "%d\n", offload_enabled);
791 else
792 return snprintf(buf, 40, "%s\n",
793 "Not applicable for a controller");
794}
795
796#define MAX_PATHS 8
797static ssize_t path_info_show(struct device *dev,
798 struct device_attribute *attr, char *buf)
799{
800 struct ctlr_info *h;
801 struct scsi_device *sdev;
802 struct hpsa_scsi_dev_t *hdev;
803 unsigned long flags;
804 int i;
805 int output_len = 0;
806 u8 box;
807 u8 bay;
808 u8 path_map_index = 0;
809 char *active;
810 unsigned char phys_connector[2];
811
812 sdev = to_scsi_device(dev);
813 h = sdev_to_hba(sdev);
814 spin_lock_irqsave(&h->devlock, flags);
815 hdev = sdev->hostdata;
816 if (!hdev) {
817 spin_unlock_irqrestore(&h->devlock, flags);
818 return -ENODEV;
819 }
820
821 bay = hdev->bay;
822 for (i = 0; i < MAX_PATHS; i++) {
823 path_map_index = 1<<i;
824 if (i == hdev->active_path_index)
825 active = "Active";
826 else if (hdev->path_map & path_map_index)
827 active = "Inactive";
828 else
829 continue;
830
831 output_len += scnprintf(buf + output_len,
832 PAGE_SIZE - output_len,
833 "[%d:%d:%d:%d] %20.20s ",
834 h->scsi_host->host_no,
835 hdev->bus, hdev->target, hdev->lun,
836 scsi_device_type(hdev->devtype));
837
838 if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) {
839 output_len += scnprintf(buf + output_len,
840 PAGE_SIZE - output_len,
841 "%s\n", active);
842 continue;
843 }
844
845 box = hdev->box[i];
846 memcpy(&phys_connector, &hdev->phys_connector[i],
847 sizeof(phys_connector));
848 if (phys_connector[0] < '0')
849 phys_connector[0] = '0';
850 if (phys_connector[1] < '0')
851 phys_connector[1] = '0';
852 output_len += scnprintf(buf + output_len,
853 PAGE_SIZE - output_len,
854 "PORT: %.2s ",
855 phys_connector);
856 if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) &&
857 hdev->expose_device) {
858 if (box == 0 || box == 0xFF) {
859 output_len += scnprintf(buf + output_len,
860 PAGE_SIZE - output_len,
861 "BAY: %hhu %s\n",
862 bay, active);
863 } else {
864 output_len += scnprintf(buf + output_len,
865 PAGE_SIZE - output_len,
866 "BOX: %hhu BAY: %hhu %s\n",
867 box, bay, active);
868 }
869 } else if (box != 0 && box != 0xFF) {
870 output_len += scnprintf(buf + output_len,
871 PAGE_SIZE - output_len, "BOX: %hhu %s\n",
872 box, active);
873 } else
874 output_len += scnprintf(buf + output_len,
875 PAGE_SIZE - output_len, "%s\n", active);
876 }
877
878 spin_unlock_irqrestore(&h->devlock, flags);
879 return output_len;
880}
881
882static ssize_t host_show_ctlr_num(struct device *dev,
883 struct device_attribute *attr, char *buf)
884{
885 struct ctlr_info *h;
886 struct Scsi_Host *shost = class_to_shost(dev);
887
888 h = shost_to_hba(shost);
889 return snprintf(buf, 20, "%d\n", h->ctlr);
890}
891
892static ssize_t host_show_legacy_board(struct device *dev,
893 struct device_attribute *attr, char *buf)
894{
895 struct ctlr_info *h;
896 struct Scsi_Host *shost = class_to_shost(dev);
897
898 h = shost_to_hba(shost);
899 return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0);
900}
901
902static DEVICE_ATTR_RO(raid_level);
903static DEVICE_ATTR_RO(lunid);
904static DEVICE_ATTR_RO(unique_id);
905static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
906static DEVICE_ATTR_RO(sas_address);
907static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
908 host_show_hp_ssd_smart_path_enabled, NULL);
909static DEVICE_ATTR_RO(path_info);
910static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
911 host_show_hp_ssd_smart_path_status,
912 host_store_hp_ssd_smart_path_status);
913static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
914 host_store_raid_offload_debug);
915static DEVICE_ATTR(firmware_revision, S_IRUGO,
916 host_show_firmware_revision, NULL);
917static DEVICE_ATTR(commands_outstanding, S_IRUGO,
918 host_show_commands_outstanding, NULL);
919static DEVICE_ATTR(transport_mode, S_IRUGO,
920 host_show_transport_mode, NULL);
921static DEVICE_ATTR(resettable, S_IRUGO,
922 host_show_resettable, NULL);
923static DEVICE_ATTR(lockup_detected, S_IRUGO,
924 host_show_lockup_detected, NULL);
925static DEVICE_ATTR(ctlr_num, S_IRUGO,
926 host_show_ctlr_num, NULL);
927static DEVICE_ATTR(legacy_board, S_IRUGO,
928 host_show_legacy_board, NULL);
929
930static struct device_attribute *hpsa_sdev_attrs[] = {
931 &dev_attr_raid_level,
932 &dev_attr_lunid,
933 &dev_attr_unique_id,
934 &dev_attr_hp_ssd_smart_path_enabled,
935 &dev_attr_path_info,
936 &dev_attr_sas_address,
937 NULL,
938};
939
940static struct device_attribute *hpsa_shost_attrs[] = {
941 &dev_attr_rescan,
942 &dev_attr_firmware_revision,
943 &dev_attr_commands_outstanding,
944 &dev_attr_transport_mode,
945 &dev_attr_resettable,
946 &dev_attr_hp_ssd_smart_path_status,
947 &dev_attr_raid_offload_debug,
948 &dev_attr_lockup_detected,
949 &dev_attr_ctlr_num,
950 &dev_attr_legacy_board,
951 NULL,
952};
953
954#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\
955 HPSA_MAX_CONCURRENT_PASSTHRUS)
956
957static struct scsi_host_template hpsa_driver_template = {
958 .module = THIS_MODULE,
959 .name = HPSA,
960 .proc_name = HPSA,
961 .queuecommand = hpsa_scsi_queue_command,
962 .scan_start = hpsa_scan_start,
963 .scan_finished = hpsa_scan_finished,
964 .change_queue_depth = hpsa_change_queue_depth,
965 .this_id = -1,
966 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
967 .ioctl = hpsa_ioctl,
968 .slave_alloc = hpsa_slave_alloc,
969 .slave_configure = hpsa_slave_configure,
970 .slave_destroy = hpsa_slave_destroy,
971#ifdef CONFIG_COMPAT
972 .compat_ioctl = hpsa_compat_ioctl,
973#endif
974 .sdev_attrs = hpsa_sdev_attrs,
975 .shost_attrs = hpsa_shost_attrs,
976 .max_sectors = 2048,
977 .no_write_same = 1,
978};
979
980static inline u32 next_command(struct ctlr_info *h, u8 q)
981{
982 u32 a;
983 struct reply_queue_buffer *rq = &h->reply_queue[q];
984
985 if (h->transMethod & CFGTBL_Trans_io_accel1)
986 return h->access.command_completed(h, q);
987
988 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
989 return h->access.command_completed(h, q);
990
991 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
992 a = rq->head[rq->current_entry];
993 rq->current_entry++;
994 atomic_dec(&h->commands_outstanding);
995 } else {
996 a = FIFO_EMPTY;
997 }
998
999 if (rq->current_entry == h->max_commands) {
1000 rq->current_entry = 0;
1001 rq->wraparound ^= 1;
1002 }
1003 return a;
1004}
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037#define DEFAULT_REPLY_QUEUE (-1)
1038static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
1039 int reply_queue)
1040{
1041 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
1042 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
1043 if (unlikely(!h->msix_vectors))
1044 return;
1045 c->Header.ReplyQueue = reply_queue;
1046 }
1047}
1048
1049static void set_ioaccel1_performant_mode(struct ctlr_info *h,
1050 struct CommandList *c,
1051 int reply_queue)
1052{
1053 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
1054
1055
1056
1057
1058
1059 cp->ReplyQueue = reply_queue;
1060
1061
1062
1063
1064
1065
1066 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
1067 IOACCEL1_BUSADDR_CMDTYPE;
1068}
1069
1070static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
1071 struct CommandList *c,
1072 int reply_queue)
1073{
1074 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
1075 &h->ioaccel2_cmd_pool[c->cmdindex];
1076
1077
1078
1079
1080 cp->reply_queue = reply_queue;
1081
1082
1083
1084
1085
1086 c->busaddr |= h->ioaccel2_blockFetchTable[0];
1087}
1088
1089static void set_ioaccel2_performant_mode(struct ctlr_info *h,
1090 struct CommandList *c,
1091 int reply_queue)
1092{
1093 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
1094
1095
1096
1097
1098
1099 cp->reply_queue = reply_queue;
1100
1101
1102
1103
1104
1105
1106 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
1107}
1108
1109static int is_firmware_flash_cmd(u8 *cdb)
1110{
1111 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
1112}
1113
1114
1115
1116
1117
1118
1119#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1120#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1121#define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ)
1122static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
1123 struct CommandList *c)
1124{
1125 if (!is_firmware_flash_cmd(c->Request.CDB))
1126 return;
1127 atomic_inc(&h->firmware_flash_in_progress);
1128 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
1129}
1130
1131static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
1132 struct CommandList *c)
1133{
1134 if (is_firmware_flash_cmd(c->Request.CDB) &&
1135 atomic_dec_and_test(&h->firmware_flash_in_progress))
1136 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
1137}
1138
1139static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
1140 struct CommandList *c, int reply_queue)
1141{
1142 dial_down_lockup_detection_during_fw_flash(h, c);
1143 atomic_inc(&h->commands_outstanding);
1144 if (c->device)
1145 atomic_inc(&c->device->commands_outstanding);
1146
1147 reply_queue = h->reply_map[raw_smp_processor_id()];
1148 switch (c->cmd_type) {
1149 case CMD_IOACCEL1:
1150 set_ioaccel1_performant_mode(h, c, reply_queue);
1151 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
1152 break;
1153 case CMD_IOACCEL2:
1154 set_ioaccel2_performant_mode(h, c, reply_queue);
1155 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1156 break;
1157 case IOACCEL2_TMF:
1158 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
1159 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1160 break;
1161 default:
1162 set_performant_mode(h, c, reply_queue);
1163 h->access.submit_command(h, c);
1164 }
1165}
1166
1167static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
1168{
1169 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
1170}
1171
1172static inline int is_hba_lunid(unsigned char scsi3addr[])
1173{
1174 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
1175}
1176
1177static inline int is_scsi_rev_5(struct ctlr_info *h)
1178{
1179 if (!h->hba_inquiry_data)
1180 return 0;
1181 if ((h->hba_inquiry_data[2] & 0x07) == 5)
1182 return 1;
1183 return 0;
1184}
1185
1186static int hpsa_find_target_lun(struct ctlr_info *h,
1187 unsigned char scsi3addr[], int bus, int *target, int *lun)
1188{
1189
1190
1191
1192 int i, found = 0;
1193 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
1194
1195 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
1196
1197 for (i = 0; i < h->ndevices; i++) {
1198 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
1199 __set_bit(h->dev[i]->target, lun_taken);
1200 }
1201
1202 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1203 if (i < HPSA_MAX_DEVICES) {
1204
1205 *target = i;
1206 *lun = 0;
1207 found = 1;
1208 }
1209 return !found;
1210}
1211
1212static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1213 struct hpsa_scsi_dev_t *dev, char *description)
1214{
1215#define LABEL_SIZE 25
1216 char label[LABEL_SIZE];
1217
1218 if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
1219 return;
1220
1221 switch (dev->devtype) {
1222 case TYPE_RAID:
1223 snprintf(label, LABEL_SIZE, "controller");
1224 break;
1225 case TYPE_ENCLOSURE:
1226 snprintf(label, LABEL_SIZE, "enclosure");
1227 break;
1228 case TYPE_DISK:
1229 case TYPE_ZBC:
1230 if (dev->external)
1231 snprintf(label, LABEL_SIZE, "external");
1232 else if (!is_logical_dev_addr_mode(dev->scsi3addr))
1233 snprintf(label, LABEL_SIZE, "%s",
1234 raid_label[PHYSICAL_DRIVE]);
1235 else
1236 snprintf(label, LABEL_SIZE, "RAID-%s",
1237 dev->raid_level > RAID_UNKNOWN ? "?" :
1238 raid_label[dev->raid_level]);
1239 break;
1240 case TYPE_ROM:
1241 snprintf(label, LABEL_SIZE, "rom");
1242 break;
1243 case TYPE_TAPE:
1244 snprintf(label, LABEL_SIZE, "tape");
1245 break;
1246 case TYPE_MEDIUM_CHANGER:
1247 snprintf(label, LABEL_SIZE, "changer");
1248 break;
1249 default:
1250 snprintf(label, LABEL_SIZE, "UNKNOWN");
1251 break;
1252 }
1253
1254 dev_printk(level, &h->pdev->dev,
1255 "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
1256 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1257 description,
1258 scsi_device_type(dev->devtype),
1259 dev->vendor,
1260 dev->model,
1261 label,
1262 dev->offload_config ? '+' : '-',
1263 dev->offload_to_be_enabled ? '+' : '-',
1264 dev->expose_device);
1265}
1266
1267
1268static int hpsa_scsi_add_entry(struct ctlr_info *h,
1269 struct hpsa_scsi_dev_t *device,
1270 struct hpsa_scsi_dev_t *added[], int *nadded)
1271{
1272
1273 int n = h->ndevices;
1274 int i;
1275 unsigned char addr1[8], addr2[8];
1276 struct hpsa_scsi_dev_t *sd;
1277
1278 if (n >= HPSA_MAX_DEVICES) {
1279 dev_err(&h->pdev->dev, "too many devices, some will be "
1280 "inaccessible.\n");
1281 return -1;
1282 }
1283
1284
1285 if (device->lun != -1)
1286
1287 goto lun_assigned;
1288
1289
1290
1291
1292
1293 if (device->scsi3addr[4] == 0) {
1294
1295 if (hpsa_find_target_lun(h, device->scsi3addr,
1296 device->bus, &device->target, &device->lun) != 0)
1297 return -1;
1298 goto lun_assigned;
1299 }
1300
1301
1302
1303
1304
1305
1306
1307 memcpy(addr1, device->scsi3addr, 8);
1308 addr1[4] = 0;
1309 addr1[5] = 0;
1310 for (i = 0; i < n; i++) {
1311 sd = h->dev[i];
1312 memcpy(addr2, sd->scsi3addr, 8);
1313 addr2[4] = 0;
1314 addr2[5] = 0;
1315
1316 if (memcmp(addr1, addr2, 8) == 0) {
1317 device->bus = sd->bus;
1318 device->target = sd->target;
1319 device->lun = device->scsi3addr[4];
1320 break;
1321 }
1322 }
1323 if (device->lun == -1) {
1324 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1325 " suspect firmware bug or unsupported hardware "
1326 "configuration.\n");
1327 return -1;
1328 }
1329
1330lun_assigned:
1331
1332 h->dev[n] = device;
1333 h->ndevices++;
1334 added[*nadded] = device;
1335 (*nadded)++;
1336 hpsa_show_dev_msg(KERN_INFO, h, device,
1337 device->expose_device ? "added" : "masked");
1338 return 0;
1339}
1340
1341
1342
1343
1344
1345
1346static void hpsa_scsi_update_entry(struct ctlr_info *h,
1347 int entry, struct hpsa_scsi_dev_t *new_entry)
1348{
1349
1350 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1351
1352
1353 h->dev[entry]->raid_level = new_entry->raid_level;
1354
1355
1356
1357
1358 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1359
1360
1361 if (new_entry->offload_config && new_entry->offload_to_be_enabled) {
1362
1363
1364
1365
1366
1367
1368
1369
1370 h->dev[entry]->raid_map = new_entry->raid_map;
1371 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1372 }
1373 if (new_entry->offload_to_be_enabled) {
1374 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1375 wmb();
1376 }
1377 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
1378 h->dev[entry]->offload_config = new_entry->offload_config;
1379 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1380 h->dev[entry]->queue_depth = new_entry->queue_depth;
1381
1382
1383
1384
1385
1386
1387 h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled;
1388
1389
1390
1391
1392 if (!new_entry->offload_to_be_enabled)
1393 h->dev[entry]->offload_enabled = 0;
1394
1395 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1396}
1397
1398
1399static void hpsa_scsi_replace_entry(struct ctlr_info *h,
1400 int entry, struct hpsa_scsi_dev_t *new_entry,
1401 struct hpsa_scsi_dev_t *added[], int *nadded,
1402 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1403{
1404
1405 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1406 removed[*nremoved] = h->dev[entry];
1407 (*nremoved)++;
1408
1409
1410
1411
1412
1413 if (new_entry->target == -1) {
1414 new_entry->target = h->dev[entry]->target;
1415 new_entry->lun = h->dev[entry]->lun;
1416 }
1417
1418 h->dev[entry] = new_entry;
1419 added[*nadded] = new_entry;
1420 (*nadded)++;
1421
1422 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1423}
1424
1425
1426static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
1427 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1428{
1429
1430 int i;
1431 struct hpsa_scsi_dev_t *sd;
1432
1433 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1434
1435 sd = h->dev[entry];
1436 removed[*nremoved] = h->dev[entry];
1437 (*nremoved)++;
1438
1439 for (i = entry; i < h->ndevices-1; i++)
1440 h->dev[i] = h->dev[i+1];
1441 h->ndevices--;
1442 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1443}
1444
1445#define SCSI3ADDR_EQ(a, b) ( \
1446 (a)[7] == (b)[7] && \
1447 (a)[6] == (b)[6] && \
1448 (a)[5] == (b)[5] && \
1449 (a)[4] == (b)[4] && \
1450 (a)[3] == (b)[3] && \
1451 (a)[2] == (b)[2] && \
1452 (a)[1] == (b)[1] && \
1453 (a)[0] == (b)[0])
1454
1455static void fixup_botched_add(struct ctlr_info *h,
1456 struct hpsa_scsi_dev_t *added)
1457{
1458
1459
1460
1461 unsigned long flags;
1462 int i, j;
1463
1464 spin_lock_irqsave(&h->lock, flags);
1465 for (i = 0; i < h->ndevices; i++) {
1466 if (h->dev[i] == added) {
1467 for (j = i; j < h->ndevices-1; j++)
1468 h->dev[j] = h->dev[j+1];
1469 h->ndevices--;
1470 break;
1471 }
1472 }
1473 spin_unlock_irqrestore(&h->lock, flags);
1474 kfree(added);
1475}
1476
1477static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1478 struct hpsa_scsi_dev_t *dev2)
1479{
1480
1481
1482
1483
1484 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1485 sizeof(dev1->scsi3addr)) != 0)
1486 return 0;
1487 if (memcmp(dev1->device_id, dev2->device_id,
1488 sizeof(dev1->device_id)) != 0)
1489 return 0;
1490 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1491 return 0;
1492 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1493 return 0;
1494 if (dev1->devtype != dev2->devtype)
1495 return 0;
1496 if (dev1->bus != dev2->bus)
1497 return 0;
1498 return 1;
1499}
1500
1501static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1502 struct hpsa_scsi_dev_t *dev2)
1503{
1504
1505
1506
1507
1508 if (dev1->raid_level != dev2->raid_level)
1509 return 1;
1510 if (dev1->offload_config != dev2->offload_config)
1511 return 1;
1512 if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled)
1513 return 1;
1514 if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1515 if (dev1->queue_depth != dev2->queue_depth)
1516 return 1;
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526 if (dev1->ioaccel_handle != dev2->ioaccel_handle)
1527 return 1;
1528 return 0;
1529}
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1540 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1541 int *index)
1542{
1543 int i;
1544#define DEVICE_NOT_FOUND 0
1545#define DEVICE_CHANGED 1
1546#define DEVICE_SAME 2
1547#define DEVICE_UPDATED 3
1548 if (needle == NULL)
1549 return DEVICE_NOT_FOUND;
1550
1551 for (i = 0; i < haystack_size; i++) {
1552 if (haystack[i] == NULL)
1553 continue;
1554 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1555 *index = i;
1556 if (device_is_the_same(needle, haystack[i])) {
1557 if (device_updated(needle, haystack[i]))
1558 return DEVICE_UPDATED;
1559 return DEVICE_SAME;
1560 } else {
1561
1562 if (needle->volume_offline)
1563 return DEVICE_NOT_FOUND;
1564 return DEVICE_CHANGED;
1565 }
1566 }
1567 }
1568 *index = -1;
1569 return DEVICE_NOT_FOUND;
1570}
1571
1572static void hpsa_monitor_offline_device(struct ctlr_info *h,
1573 unsigned char scsi3addr[])
1574{
1575 struct offline_device_entry *device;
1576 unsigned long flags;
1577
1578
1579 spin_lock_irqsave(&h->offline_device_lock, flags);
1580 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1581 if (memcmp(device->scsi3addr, scsi3addr,
1582 sizeof(device->scsi3addr)) == 0) {
1583 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1584 return;
1585 }
1586 }
1587 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1588
1589
1590 device = kmalloc(sizeof(*device), GFP_KERNEL);
1591 if (!device)
1592 return;
1593
1594 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1595 spin_lock_irqsave(&h->offline_device_lock, flags);
1596 list_add_tail(&device->offline_list, &h->offline_device_list);
1597 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1598}
1599
1600
1601static void hpsa_show_volume_status(struct ctlr_info *h,
1602 struct hpsa_scsi_dev_t *sd)
1603{
1604 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1605 dev_info(&h->pdev->dev,
1606 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1607 h->scsi_host->host_no,
1608 sd->bus, sd->target, sd->lun);
1609 switch (sd->volume_offline) {
1610 case HPSA_LV_OK:
1611 break;
1612 case HPSA_LV_UNDERGOING_ERASE:
1613 dev_info(&h->pdev->dev,
1614 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1615 h->scsi_host->host_no,
1616 sd->bus, sd->target, sd->lun);
1617 break;
1618 case HPSA_LV_NOT_AVAILABLE:
1619 dev_info(&h->pdev->dev,
1620 "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1621 h->scsi_host->host_no,
1622 sd->bus, sd->target, sd->lun);
1623 break;
1624 case HPSA_LV_UNDERGOING_RPI:
1625 dev_info(&h->pdev->dev,
1626 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1627 h->scsi_host->host_no,
1628 sd->bus, sd->target, sd->lun);
1629 break;
1630 case HPSA_LV_PENDING_RPI:
1631 dev_info(&h->pdev->dev,
1632 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1633 h->scsi_host->host_no,
1634 sd->bus, sd->target, sd->lun);
1635 break;
1636 case HPSA_LV_ENCRYPTED_NO_KEY:
1637 dev_info(&h->pdev->dev,
1638 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1639 h->scsi_host->host_no,
1640 sd->bus, sd->target, sd->lun);
1641 break;
1642 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1643 dev_info(&h->pdev->dev,
1644 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1645 h->scsi_host->host_no,
1646 sd->bus, sd->target, sd->lun);
1647 break;
1648 case HPSA_LV_UNDERGOING_ENCRYPTION:
1649 dev_info(&h->pdev->dev,
1650 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1651 h->scsi_host->host_no,
1652 sd->bus, sd->target, sd->lun);
1653 break;
1654 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1655 dev_info(&h->pdev->dev,
1656 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1657 h->scsi_host->host_no,
1658 sd->bus, sd->target, sd->lun);
1659 break;
1660 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1661 dev_info(&h->pdev->dev,
1662 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1663 h->scsi_host->host_no,
1664 sd->bus, sd->target, sd->lun);
1665 break;
1666 case HPSA_LV_PENDING_ENCRYPTION:
1667 dev_info(&h->pdev->dev,
1668 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1669 h->scsi_host->host_no,
1670 sd->bus, sd->target, sd->lun);
1671 break;
1672 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1673 dev_info(&h->pdev->dev,
1674 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1675 h->scsi_host->host_no,
1676 sd->bus, sd->target, sd->lun);
1677 break;
1678 }
1679}
1680
1681
1682
1683
1684
1685static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1686 struct hpsa_scsi_dev_t *dev[], int ndevices,
1687 struct hpsa_scsi_dev_t *logical_drive)
1688{
1689 struct raid_map_data *map = &logical_drive->raid_map;
1690 struct raid_map_disk_data *dd = &map->data[0];
1691 int i, j;
1692 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1693 le16_to_cpu(map->metadata_disks_per_row);
1694 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1695 le16_to_cpu(map->layout_map_count) *
1696 total_disks_per_row;
1697 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1698 total_disks_per_row;
1699 int qdepth;
1700
1701 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1702 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1703
1704 logical_drive->nphysical_disks = nraid_map_entries;
1705
1706 qdepth = 0;
1707 for (i = 0; i < nraid_map_entries; i++) {
1708 logical_drive->phys_disk[i] = NULL;
1709 if (!logical_drive->offload_config)
1710 continue;
1711 for (j = 0; j < ndevices; j++) {
1712 if (dev[j] == NULL)
1713 continue;
1714 if (dev[j]->devtype != TYPE_DISK &&
1715 dev[j]->devtype != TYPE_ZBC)
1716 continue;
1717 if (is_logical_device(dev[j]))
1718 continue;
1719 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1720 continue;
1721
1722 logical_drive->phys_disk[i] = dev[j];
1723 if (i < nphys_disk)
1724 qdepth = min(h->nr_cmds, qdepth +
1725 logical_drive->phys_disk[i]->queue_depth);
1726 break;
1727 }
1728
1729
1730
1731
1732
1733
1734
1735
1736 if (!logical_drive->phys_disk[i]) {
1737 dev_warn(&h->pdev->dev,
1738 "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n",
1739 __func__,
1740 h->scsi_host->host_no, logical_drive->bus,
1741 logical_drive->target, logical_drive->lun);
1742 logical_drive->offload_enabled = 0;
1743 logical_drive->offload_to_be_enabled = 0;
1744 logical_drive->queue_depth = 8;
1745 }
1746 }
1747 if (nraid_map_entries)
1748
1749
1750
1751
1752 logical_drive->queue_depth = qdepth;
1753 else {
1754 if (logical_drive->external)
1755 logical_drive->queue_depth = EXTERNAL_QD;
1756 else
1757 logical_drive->queue_depth = h->nr_cmds;
1758 }
1759}
1760
1761static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1762 struct hpsa_scsi_dev_t *dev[], int ndevices)
1763{
1764 int i;
1765
1766 for (i = 0; i < ndevices; i++) {
1767 if (dev[i] == NULL)
1768 continue;
1769 if (dev[i]->devtype != TYPE_DISK &&
1770 dev[i]->devtype != TYPE_ZBC)
1771 continue;
1772 if (!is_logical_device(dev[i]))
1773 continue;
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794 if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled)
1795 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1796 }
1797}
1798
1799static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1800{
1801 int rc = 0;
1802
1803 if (!h->scsi_host)
1804 return 1;
1805
1806 if (is_logical_device(device))
1807 rc = scsi_add_device(h->scsi_host, device->bus,
1808 device->target, device->lun);
1809 else
1810 rc = hpsa_add_sas_device(h->sas_host, device);
1811
1812 return rc;
1813}
1814
1815static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
1816 struct hpsa_scsi_dev_t *dev)
1817{
1818 int i;
1819 int count = 0;
1820
1821 for (i = 0; i < h->nr_cmds; i++) {
1822 struct CommandList *c = h->cmd_pool + i;
1823 int refcount = atomic_inc_return(&c->refcount);
1824
1825 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev,
1826 dev->scsi3addr)) {
1827 unsigned long flags;
1828
1829 spin_lock_irqsave(&h->lock, flags);
1830 if (!hpsa_is_cmd_idle(c))
1831 ++count;
1832 spin_unlock_irqrestore(&h->lock, flags);
1833 }
1834
1835 cmd_free(h, c);
1836 }
1837
1838 return count;
1839}
1840
1841#define NUM_WAIT 20
1842static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
1843 struct hpsa_scsi_dev_t *device)
1844{
1845 int cmds = 0;
1846 int waits = 0;
1847 int num_wait = NUM_WAIT;
1848
1849 if (device->external)
1850 num_wait = HPSA_EH_PTRAID_TIMEOUT;
1851
1852 while (1) {
1853 cmds = hpsa_find_outstanding_commands_for_dev(h, device);
1854 if (cmds == 0)
1855 break;
1856 if (++waits > num_wait)
1857 break;
1858 msleep(1000);
1859 }
1860
1861 if (waits > num_wait) {
1862 dev_warn(&h->pdev->dev,
1863 "%s: removing device [%d:%d:%d:%d] with %d outstanding commands!\n",
1864 __func__,
1865 h->scsi_host->host_no,
1866 device->bus, device->target, device->lun, cmds);
1867 }
1868}
1869
1870static void hpsa_remove_device(struct ctlr_info *h,
1871 struct hpsa_scsi_dev_t *device)
1872{
1873 struct scsi_device *sdev = NULL;
1874
1875 if (!h->scsi_host)
1876 return;
1877
1878
1879
1880
1881 device->removed = 1;
1882 hpsa_wait_for_outstanding_commands_for_dev(h, device);
1883
1884 if (is_logical_device(device)) {
1885 sdev = scsi_device_lookup(h->scsi_host, device->bus,
1886 device->target, device->lun);
1887 if (sdev) {
1888 scsi_remove_device(sdev);
1889 scsi_device_put(sdev);
1890 } else {
1891
1892
1893
1894
1895
1896 hpsa_show_dev_msg(KERN_WARNING, h, device,
1897 "didn't find device for removal.");
1898 }
1899 } else {
1900
1901 hpsa_remove_sas_device(device);
1902 }
1903}
1904
1905static void adjust_hpsa_scsi_table(struct ctlr_info *h,
1906 struct hpsa_scsi_dev_t *sd[], int nsds)
1907{
1908
1909
1910
1911
1912 int i, entry, device_change, changes = 0;
1913 struct hpsa_scsi_dev_t *csd;
1914 unsigned long flags;
1915 struct hpsa_scsi_dev_t **added, **removed;
1916 int nadded, nremoved;
1917
1918
1919
1920
1921
1922 spin_lock_irqsave(&h->reset_lock, flags);
1923 if (h->reset_in_progress) {
1924 h->drv_req_rescan = 1;
1925 spin_unlock_irqrestore(&h->reset_lock, flags);
1926 return;
1927 }
1928 spin_unlock_irqrestore(&h->reset_lock, flags);
1929
1930 added = kcalloc(HPSA_MAX_DEVICES, sizeof(*added), GFP_KERNEL);
1931 removed = kcalloc(HPSA_MAX_DEVICES, sizeof(*removed), GFP_KERNEL);
1932
1933 if (!added || !removed) {
1934 dev_warn(&h->pdev->dev, "out of memory in "
1935 "adjust_hpsa_scsi_table\n");
1936 goto free_and_out;
1937 }
1938
1939 spin_lock_irqsave(&h->devlock, flags);
1940
1941
1942
1943
1944
1945
1946
1947
1948 i = 0;
1949 nremoved = 0;
1950 nadded = 0;
1951 while (i < h->ndevices) {
1952 csd = h->dev[i];
1953 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1954 if (device_change == DEVICE_NOT_FOUND) {
1955 changes++;
1956 hpsa_scsi_remove_entry(h, i, removed, &nremoved);
1957 continue;
1958 } else if (device_change == DEVICE_CHANGED) {
1959 changes++;
1960 hpsa_scsi_replace_entry(h, i, sd[entry],
1961 added, &nadded, removed, &nremoved);
1962
1963
1964
1965 sd[entry] = NULL;
1966 } else if (device_change == DEVICE_UPDATED) {
1967 hpsa_scsi_update_entry(h, i, sd[entry]);
1968 }
1969 i++;
1970 }
1971
1972
1973
1974
1975
1976 for (i = 0; i < nsds; i++) {
1977 if (!sd[i])
1978 continue;
1979
1980
1981
1982
1983
1984
1985 if (sd[i]->volume_offline) {
1986 hpsa_show_volume_status(h, sd[i]);
1987 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1988 continue;
1989 }
1990
1991 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1992 h->ndevices, &entry);
1993 if (device_change == DEVICE_NOT_FOUND) {
1994 changes++;
1995 if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
1996 break;
1997 sd[i] = NULL;
1998 } else if (device_change == DEVICE_CHANGED) {
1999
2000 changes++;
2001 dev_warn(&h->pdev->dev,
2002 "device unexpectedly changed.\n");
2003
2004 }
2005 }
2006 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016 for (i = 0; i < h->ndevices; i++) {
2017 if (h->dev[i] == NULL)
2018 continue;
2019 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
2020 }
2021
2022 spin_unlock_irqrestore(&h->devlock, flags);
2023
2024
2025
2026
2027
2028 for (i = 0; i < nsds; i++) {
2029 if (!sd[i])
2030 continue;
2031 if (sd[i]->volume_offline)
2032 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
2033 }
2034
2035
2036
2037
2038
2039 if (!changes)
2040 goto free_and_out;
2041
2042
2043 for (i = 0; i < nremoved; i++) {
2044 if (removed[i] == NULL)
2045 continue;
2046 if (removed[i]->expose_device)
2047 hpsa_remove_device(h, removed[i]);
2048 kfree(removed[i]);
2049 removed[i] = NULL;
2050 }
2051
2052
2053 for (i = 0; i < nadded; i++) {
2054 int rc = 0;
2055
2056 if (added[i] == NULL)
2057 continue;
2058 if (!(added[i]->expose_device))
2059 continue;
2060 rc = hpsa_add_device(h, added[i]);
2061 if (!rc)
2062 continue;
2063 dev_warn(&h->pdev->dev,
2064 "addition failed %d, device not added.", rc);
2065
2066
2067
2068 fixup_botched_add(h, added[i]);
2069 h->drv_req_rescan = 1;
2070 }
2071
2072free_and_out:
2073 kfree(added);
2074 kfree(removed);
2075}
2076
2077
2078
2079
2080
2081static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
2082 int bus, int target, int lun)
2083{
2084 int i;
2085 struct hpsa_scsi_dev_t *sd;
2086
2087 for (i = 0; i < h->ndevices; i++) {
2088 sd = h->dev[i];
2089 if (sd->bus == bus && sd->target == target && sd->lun == lun)
2090 return sd;
2091 }
2092 return NULL;
2093}
2094
2095static int hpsa_slave_alloc(struct scsi_device *sdev)
2096{
2097 struct hpsa_scsi_dev_t *sd = NULL;
2098 unsigned long flags;
2099 struct ctlr_info *h;
2100
2101 h = sdev_to_hba(sdev);
2102 spin_lock_irqsave(&h->devlock, flags);
2103 if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) {
2104 struct scsi_target *starget;
2105 struct sas_rphy *rphy;
2106
2107 starget = scsi_target(sdev);
2108 rphy = target_to_rphy(starget);
2109 sd = hpsa_find_device_by_sas_rphy(h, rphy);
2110 if (sd) {
2111 sd->target = sdev_id(sdev);
2112 sd->lun = sdev->lun;
2113 }
2114 }
2115 if (!sd)
2116 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
2117 sdev_id(sdev), sdev->lun);
2118
2119 if (sd && sd->expose_device) {
2120 atomic_set(&sd->ioaccel_cmds_out, 0);
2121 sdev->hostdata = sd;
2122 } else
2123 sdev->hostdata = NULL;
2124 spin_unlock_irqrestore(&h->devlock, flags);
2125 return 0;
2126}
2127
2128
2129static int hpsa_slave_configure(struct scsi_device *sdev)
2130{
2131 struct hpsa_scsi_dev_t *sd;
2132 int queue_depth;
2133
2134 sd = sdev->hostdata;
2135 sdev->no_uld_attach = !sd || !sd->expose_device;
2136
2137 if (sd) {
2138 sd->was_removed = 0;
2139 if (sd->external) {
2140 queue_depth = EXTERNAL_QD;
2141 sdev->eh_timeout = HPSA_EH_PTRAID_TIMEOUT;
2142 blk_queue_rq_timeout(sdev->request_queue,
2143 HPSA_EH_PTRAID_TIMEOUT);
2144 } else {
2145 queue_depth = sd->queue_depth != 0 ?
2146 sd->queue_depth : sdev->host->can_queue;
2147 }
2148 } else
2149 queue_depth = sdev->host->can_queue;
2150
2151 scsi_change_queue_depth(sdev, queue_depth);
2152
2153 return 0;
2154}
2155
2156static void hpsa_slave_destroy(struct scsi_device *sdev)
2157{
2158 struct hpsa_scsi_dev_t *hdev = NULL;
2159
2160 hdev = sdev->hostdata;
2161
2162 if (hdev)
2163 hdev->was_removed = 1;
2164}
2165
2166static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2167{
2168 int i;
2169
2170 if (!h->ioaccel2_cmd_sg_list)
2171 return;
2172 for (i = 0; i < h->nr_cmds; i++) {
2173 kfree(h->ioaccel2_cmd_sg_list[i]);
2174 h->ioaccel2_cmd_sg_list[i] = NULL;
2175 }
2176 kfree(h->ioaccel2_cmd_sg_list);
2177 h->ioaccel2_cmd_sg_list = NULL;
2178}
2179
2180static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2181{
2182 int i;
2183
2184 if (h->chainsize <= 0)
2185 return 0;
2186
2187 h->ioaccel2_cmd_sg_list =
2188 kcalloc(h->nr_cmds, sizeof(*h->ioaccel2_cmd_sg_list),
2189 GFP_KERNEL);
2190 if (!h->ioaccel2_cmd_sg_list)
2191 return -ENOMEM;
2192 for (i = 0; i < h->nr_cmds; i++) {
2193 h->ioaccel2_cmd_sg_list[i] =
2194 kmalloc_array(h->maxsgentries,
2195 sizeof(*h->ioaccel2_cmd_sg_list[i]),
2196 GFP_KERNEL);
2197 if (!h->ioaccel2_cmd_sg_list[i])
2198 goto clean;
2199 }
2200 return 0;
2201
2202clean:
2203 hpsa_free_ioaccel2_sg_chain_blocks(h);
2204 return -ENOMEM;
2205}
2206
2207static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
2208{
2209 int i;
2210
2211 if (!h->cmd_sg_list)
2212 return;
2213 for (i = 0; i < h->nr_cmds; i++) {
2214 kfree(h->cmd_sg_list[i]);
2215 h->cmd_sg_list[i] = NULL;
2216 }
2217 kfree(h->cmd_sg_list);
2218 h->cmd_sg_list = NULL;
2219}
2220
2221static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
2222{
2223 int i;
2224
2225 if (h->chainsize <= 0)
2226 return 0;
2227
2228 h->cmd_sg_list = kcalloc(h->nr_cmds, sizeof(*h->cmd_sg_list),
2229 GFP_KERNEL);
2230 if (!h->cmd_sg_list)
2231 return -ENOMEM;
2232
2233 for (i = 0; i < h->nr_cmds; i++) {
2234 h->cmd_sg_list[i] = kmalloc_array(h->chainsize,
2235 sizeof(*h->cmd_sg_list[i]),
2236 GFP_KERNEL);
2237 if (!h->cmd_sg_list[i])
2238 goto clean;
2239
2240 }
2241 return 0;
2242
2243clean:
2244 hpsa_free_sg_chain_blocks(h);
2245 return -ENOMEM;
2246}
2247
2248static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
2249 struct io_accel2_cmd *cp, struct CommandList *c)
2250{
2251 struct ioaccel2_sg_element *chain_block;
2252 u64 temp64;
2253 u32 chain_size;
2254
2255 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
2256 chain_size = le32_to_cpu(cp->sg[0].length);
2257 temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size,
2258 DMA_TO_DEVICE);
2259 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2260
2261 cp->sg->address = 0;
2262 return -1;
2263 }
2264 cp->sg->address = cpu_to_le64(temp64);
2265 return 0;
2266}
2267
2268static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
2269 struct io_accel2_cmd *cp)
2270{
2271 struct ioaccel2_sg_element *chain_sg;
2272 u64 temp64;
2273 u32 chain_size;
2274
2275 chain_sg = cp->sg;
2276 temp64 = le64_to_cpu(chain_sg->address);
2277 chain_size = le32_to_cpu(cp->sg[0].length);
2278 dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE);
2279}
2280
2281static int hpsa_map_sg_chain_block(struct ctlr_info *h,
2282 struct CommandList *c)
2283{
2284 struct SGDescriptor *chain_sg, *chain_block;
2285 u64 temp64;
2286 u32 chain_len;
2287
2288 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2289 chain_block = h->cmd_sg_list[c->cmdindex];
2290 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
2291 chain_len = sizeof(*chain_sg) *
2292 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
2293 chain_sg->Len = cpu_to_le32(chain_len);
2294 temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len,
2295 DMA_TO_DEVICE);
2296 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2297
2298 chain_sg->Addr = cpu_to_le64(0);
2299 return -1;
2300 }
2301 chain_sg->Addr = cpu_to_le64(temp64);
2302 return 0;
2303}
2304
2305static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
2306 struct CommandList *c)
2307{
2308 struct SGDescriptor *chain_sg;
2309
2310 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
2311 return;
2312
2313 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2314 dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr),
2315 le32_to_cpu(chain_sg->Len), DMA_TO_DEVICE);
2316}
2317
2318
2319
2320
2321
2322
2323static int handle_ioaccel_mode2_error(struct ctlr_info *h,
2324 struct CommandList *c,
2325 struct scsi_cmnd *cmd,
2326 struct io_accel2_cmd *c2,
2327 struct hpsa_scsi_dev_t *dev)
2328{
2329 int data_len;
2330 int retry = 0;
2331 u32 ioaccel2_resid = 0;
2332
2333 switch (c2->error_data.serv_response) {
2334 case IOACCEL2_SERV_RESPONSE_COMPLETE:
2335 switch (c2->error_data.status) {
2336 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
2337 if (cmd)
2338 cmd->result = 0;
2339 break;
2340 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
2341 cmd->result |= SAM_STAT_CHECK_CONDITION;
2342 if (c2->error_data.data_present !=
2343 IOACCEL2_SENSE_DATA_PRESENT) {
2344 memset(cmd->sense_buffer, 0,
2345 SCSI_SENSE_BUFFERSIZE);
2346 break;
2347 }
2348
2349 data_len = c2->error_data.sense_data_len;
2350 if (data_len > SCSI_SENSE_BUFFERSIZE)
2351 data_len = SCSI_SENSE_BUFFERSIZE;
2352 if (data_len > sizeof(c2->error_data.sense_data_buff))
2353 data_len =
2354 sizeof(c2->error_data.sense_data_buff);
2355 memcpy(cmd->sense_buffer,
2356 c2->error_data.sense_data_buff, data_len);
2357 retry = 1;
2358 break;
2359 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
2360 retry = 1;
2361 break;
2362 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
2363 retry = 1;
2364 break;
2365 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
2366 retry = 1;
2367 break;
2368 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
2369 retry = 1;
2370 break;
2371 default:
2372 retry = 1;
2373 break;
2374 }
2375 break;
2376 case IOACCEL2_SERV_RESPONSE_FAILURE:
2377 switch (c2->error_data.status) {
2378 case IOACCEL2_STATUS_SR_IO_ERROR:
2379 case IOACCEL2_STATUS_SR_IO_ABORTED:
2380 case IOACCEL2_STATUS_SR_OVERRUN:
2381 retry = 1;
2382 break;
2383 case IOACCEL2_STATUS_SR_UNDERRUN:
2384 cmd->result = (DID_OK << 16);
2385 cmd->result |= (COMMAND_COMPLETE << 8);
2386 ioaccel2_resid = get_unaligned_le32(
2387 &c2->error_data.resid_cnt[0]);
2388 scsi_set_resid(cmd, ioaccel2_resid);
2389 break;
2390 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
2391 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
2392 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
2393
2394
2395
2396
2397
2398
2399
2400
2401 if (dev->physical_device && dev->expose_device) {
2402 cmd->result = DID_NO_CONNECT << 16;
2403 dev->removed = 1;
2404 h->drv_req_rescan = 1;
2405 dev_warn(&h->pdev->dev,
2406 "%s: device is gone!\n", __func__);
2407 } else
2408
2409
2410
2411
2412
2413 retry = 1;
2414 break;
2415 default:
2416 retry = 1;
2417 }
2418 break;
2419 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
2420 break;
2421 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
2422 break;
2423 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
2424 retry = 1;
2425 break;
2426 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
2427 break;
2428 default:
2429 retry = 1;
2430 break;
2431 }
2432
2433 if (dev->in_reset)
2434 retry = 0;
2435
2436 return retry;
2437}
2438
2439static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2440 struct CommandList *c)
2441{
2442 struct hpsa_scsi_dev_t *dev = c->device;
2443
2444
2445
2446
2447
2448
2449 c->scsi_cmd = SCSI_CMD_IDLE;
2450 mb();
2451 if (dev) {
2452 atomic_dec(&dev->commands_outstanding);
2453 if (dev->in_reset &&
2454 atomic_read(&dev->commands_outstanding) <= 0)
2455 wake_up_all(&h->event_sync_wait_queue);
2456 }
2457}
2458
2459static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2460 struct CommandList *c)
2461{
2462 hpsa_cmd_resolve_events(h, c);
2463 cmd_tagged_free(h, c);
2464}
2465
2466static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2467 struct CommandList *c, struct scsi_cmnd *cmd)
2468{
2469 hpsa_cmd_resolve_and_free(h, c);
2470 if (cmd && cmd->scsi_done)
2471 cmd->scsi_done(cmd);
2472}
2473
2474static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2475{
2476 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2477 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2478}
2479
2480static void process_ioaccel2_completion(struct ctlr_info *h,
2481 struct CommandList *c, struct scsi_cmnd *cmd,
2482 struct hpsa_scsi_dev_t *dev)
2483{
2484 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2485
2486
2487 if (likely(c2->error_data.serv_response == 0 &&
2488 c2->error_data.status == 0)) {
2489 cmd->result = 0;
2490 return hpsa_cmd_free_and_done(h, c, cmd);
2491 }
2492
2493
2494
2495
2496
2497
2498 if (is_logical_device(dev) &&
2499 c2->error_data.serv_response ==
2500 IOACCEL2_SERV_RESPONSE_FAILURE) {
2501 if (c2->error_data.status ==
2502 IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
2503 dev->offload_enabled = 0;
2504 dev->offload_to_be_enabled = 0;
2505 }
2506
2507 if (dev->in_reset) {
2508 cmd->result = DID_RESET << 16;
2509 return hpsa_cmd_free_and_done(h, c, cmd);
2510 }
2511
2512 return hpsa_retry_cmd(h, c);
2513 }
2514
2515 if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev))
2516 return hpsa_retry_cmd(h, c);
2517
2518 return hpsa_cmd_free_and_done(h, c, cmd);
2519}
2520
2521
2522static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2523 struct CommandList *cp)
2524{
2525 u8 tmf_status = cp->err_info->ScsiStatus;
2526
2527 switch (tmf_status) {
2528 case CISS_TMF_COMPLETE:
2529
2530
2531
2532
2533 case CISS_TMF_SUCCESS:
2534 return 0;
2535 case CISS_TMF_INVALID_FRAME:
2536 case CISS_TMF_NOT_SUPPORTED:
2537 case CISS_TMF_FAILED:
2538 case CISS_TMF_WRONG_LUN:
2539 case CISS_TMF_OVERLAPPED_TAG:
2540 break;
2541 default:
2542 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2543 tmf_status);
2544 break;
2545 }
2546 return -tmf_status;
2547}
2548
2549static void complete_scsi_command(struct CommandList *cp)
2550{
2551 struct scsi_cmnd *cmd;
2552 struct ctlr_info *h;
2553 struct ErrorInfo *ei;
2554 struct hpsa_scsi_dev_t *dev;
2555 struct io_accel2_cmd *c2;
2556
2557 u8 sense_key;
2558 u8 asc;
2559 u8 ascq;
2560 unsigned long sense_data_size;
2561
2562 ei = cp->err_info;
2563 cmd = cp->scsi_cmd;
2564 h = cp->h;
2565
2566 if (!cmd->device) {
2567 cmd->result = DID_NO_CONNECT << 16;
2568 return hpsa_cmd_free_and_done(h, cp, cmd);
2569 }
2570
2571 dev = cmd->device->hostdata;
2572 if (!dev) {
2573 cmd->result = DID_NO_CONNECT << 16;
2574 return hpsa_cmd_free_and_done(h, cp, cmd);
2575 }
2576 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2577
2578 scsi_dma_unmap(cmd);
2579 if ((cp->cmd_type == CMD_SCSI) &&
2580 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
2581 hpsa_unmap_sg_chain_block(h, cp);
2582
2583 if ((cp->cmd_type == CMD_IOACCEL2) &&
2584 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2585 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2586
2587 cmd->result = (DID_OK << 16);
2588 cmd->result |= (COMMAND_COMPLETE << 8);
2589
2590
2591 if (dev->was_removed) {
2592 hpsa_cmd_resolve_and_free(h, cp);
2593 return;
2594 }
2595
2596 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
2597 if (dev->physical_device && dev->expose_device &&
2598 dev->removed) {
2599 cmd->result = DID_NO_CONNECT << 16;
2600 return hpsa_cmd_free_and_done(h, cp, cmd);
2601 }
2602 if (likely(cp->phys_disk != NULL))
2603 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2604 }
2605
2606
2607
2608
2609
2610
2611 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2612
2613 cmd->result = DID_NO_CONNECT << 16;
2614 return hpsa_cmd_free_and_done(h, cp, cmd);
2615 }
2616
2617 if (cp->cmd_type == CMD_IOACCEL2)
2618 return process_ioaccel2_completion(h, cp, cmd, dev);
2619
2620 scsi_set_resid(cmd, ei->ResidualCnt);
2621 if (ei->CommandStatus == 0)
2622 return hpsa_cmd_free_and_done(h, cp, cmd);
2623
2624
2625
2626
2627 if (cp->cmd_type == CMD_IOACCEL1) {
2628 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2629 cp->Header.SGList = scsi_sg_count(cmd);
2630 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2631 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2632 IOACCEL1_IOFLAGS_CDBLEN_MASK;
2633 cp->Header.tag = c->tag;
2634 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2635 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
2636
2637
2638
2639
2640
2641 if (is_logical_device(dev)) {
2642 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2643 dev->offload_enabled = 0;
2644 return hpsa_retry_cmd(h, cp);
2645 }
2646 }
2647
2648
2649 switch (ei->CommandStatus) {
2650
2651 case CMD_TARGET_STATUS:
2652 cmd->result |= ei->ScsiStatus;
2653
2654 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2655 sense_data_size = SCSI_SENSE_BUFFERSIZE;
2656 else
2657 sense_data_size = sizeof(ei->SenseInfo);
2658 if (ei->SenseLen < sense_data_size)
2659 sense_data_size = ei->SenseLen;
2660 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2661 if (ei->ScsiStatus)
2662 decode_sense_data(ei->SenseInfo, sense_data_size,
2663 &sense_key, &asc, &ascq);
2664 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2665 switch (sense_key) {
2666 case ABORTED_COMMAND:
2667 cmd->result |= DID_SOFT_ERROR << 16;
2668 break;
2669 case UNIT_ATTENTION:
2670 if (asc == 0x3F && ascq == 0x0E)
2671 h->drv_req_rescan = 1;
2672 break;
2673 case ILLEGAL_REQUEST:
2674 if (asc == 0x25 && ascq == 0x00) {
2675 dev->removed = 1;
2676 cmd->result = DID_NO_CONNECT << 16;
2677 }
2678 break;
2679 }
2680 break;
2681 }
2682
2683
2684
2685 if (ei->ScsiStatus) {
2686 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2687 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2688 "Returning result: 0x%x\n",
2689 cp, ei->ScsiStatus,
2690 sense_key, asc, ascq,
2691 cmd->result);
2692 } else {
2693 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2694 "Returning no connection.\n", cp),
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708 cmd->result = DID_NO_CONNECT << 16;
2709 }
2710 break;
2711
2712 case CMD_DATA_UNDERRUN:
2713 break;
2714 case CMD_DATA_OVERRUN:
2715 dev_warn(&h->pdev->dev,
2716 "CDB %16phN data overrun\n", cp->Request.CDB);
2717 break;
2718 case CMD_INVALID: {
2719
2720
2721
2722
2723
2724
2725
2726
2727 cmd->result = DID_NO_CONNECT << 16;
2728 }
2729 break;
2730 case CMD_PROTOCOL_ERR:
2731 cmd->result = DID_ERROR << 16;
2732 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2733 cp->Request.CDB);
2734 break;
2735 case CMD_HARDWARE_ERR:
2736 cmd->result = DID_ERROR << 16;
2737 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2738 cp->Request.CDB);
2739 break;
2740 case CMD_CONNECTION_LOST:
2741 cmd->result = DID_ERROR << 16;
2742 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2743 cp->Request.CDB);
2744 break;
2745 case CMD_ABORTED:
2746 cmd->result = DID_ABORT << 16;
2747 break;
2748 case CMD_ABORT_FAILED:
2749 cmd->result = DID_ERROR << 16;
2750 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2751 cp->Request.CDB);
2752 break;
2753 case CMD_UNSOLICITED_ABORT:
2754 cmd->result = DID_SOFT_ERROR << 16;
2755 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2756 cp->Request.CDB);
2757 break;
2758 case CMD_TIMEOUT:
2759 cmd->result = DID_TIME_OUT << 16;
2760 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2761 cp->Request.CDB);
2762 break;
2763 case CMD_UNABORTABLE:
2764 cmd->result = DID_ERROR << 16;
2765 dev_warn(&h->pdev->dev, "Command unabortable\n");
2766 break;
2767 case CMD_TMF_STATUS:
2768 if (hpsa_evaluate_tmf_status(h, cp))
2769 cmd->result = DID_ERROR << 16;
2770 break;
2771 case CMD_IOACCEL_DISABLED:
2772
2773
2774
2775 cmd->result = DID_SOFT_ERROR << 16;
2776 dev_warn(&h->pdev->dev,
2777 "cp %p had HP SSD Smart Path error\n", cp);
2778 break;
2779 default:
2780 cmd->result = DID_ERROR << 16;
2781 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2782 cp, ei->CommandStatus);
2783 }
2784
2785 return hpsa_cmd_free_and_done(h, cp, cmd);
2786}
2787
2788static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c,
2789 int sg_used, enum dma_data_direction data_direction)
2790{
2791 int i;
2792
2793 for (i = 0; i < sg_used; i++)
2794 dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr),
2795 le32_to_cpu(c->SG[i].Len),
2796 data_direction);
2797}
2798
2799static int hpsa_map_one(struct pci_dev *pdev,
2800 struct CommandList *cp,
2801 unsigned char *buf,
2802 size_t buflen,
2803 enum dma_data_direction data_direction)
2804{
2805 u64 addr64;
2806
2807 if (buflen == 0 || data_direction == DMA_NONE) {
2808 cp->Header.SGList = 0;
2809 cp->Header.SGTotal = cpu_to_le16(0);
2810 return 0;
2811 }
2812
2813 addr64 = dma_map_single(&pdev->dev, buf, buflen, data_direction);
2814 if (dma_mapping_error(&pdev->dev, addr64)) {
2815
2816 cp->Header.SGList = 0;
2817 cp->Header.SGTotal = cpu_to_le16(0);
2818 return -1;
2819 }
2820 cp->SG[0].Addr = cpu_to_le64(addr64);
2821 cp->SG[0].Len = cpu_to_le32(buflen);
2822 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST);
2823 cp->Header.SGList = 1;
2824 cp->Header.SGTotal = cpu_to_le16(1);
2825 return 0;
2826}
2827
2828#define NO_TIMEOUT ((unsigned long) -1)
2829#define DEFAULT_TIMEOUT 30000
2830static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2831 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2832{
2833 DECLARE_COMPLETION_ONSTACK(wait);
2834
2835 c->waiting = &wait;
2836 __enqueue_cmd_and_start_io(h, c, reply_queue);
2837 if (timeout_msecs == NO_TIMEOUT) {
2838
2839 wait_for_completion_io(&wait);
2840 return IO_OK;
2841 }
2842 if (!wait_for_completion_io_timeout(&wait,
2843 msecs_to_jiffies(timeout_msecs))) {
2844 dev_warn(&h->pdev->dev, "Command timed out.\n");
2845 return -ETIMEDOUT;
2846 }
2847 return IO_OK;
2848}
2849
2850static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2851 int reply_queue, unsigned long timeout_msecs)
2852{
2853 if (unlikely(lockup_detected(h))) {
2854 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2855 return IO_OK;
2856 }
2857 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2858}
2859
2860static u32 lockup_detected(struct ctlr_info *h)
2861{
2862 int cpu;
2863 u32 rc, *lockup_detected;
2864
2865 cpu = get_cpu();
2866 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2867 rc = *lockup_detected;
2868 put_cpu();
2869 return rc;
2870}
2871
2872#define MAX_DRIVER_CMD_RETRIES 25
2873static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2874 struct CommandList *c, enum dma_data_direction data_direction,
2875 unsigned long timeout_msecs)
2876{
2877 int backoff_time = 10, retry_count = 0;
2878 int rc;
2879
2880 do {
2881 memset(c->err_info, 0, sizeof(*c->err_info));
2882 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2883 timeout_msecs);
2884 if (rc)
2885 break;
2886 retry_count++;
2887 if (retry_count > 3) {
2888 msleep(backoff_time);
2889 if (backoff_time < 1000)
2890 backoff_time *= 2;
2891 }
2892 } while ((check_for_unit_attention(h, c) ||
2893 check_for_busy(h, c)) &&
2894 retry_count <= MAX_DRIVER_CMD_RETRIES);
2895 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2896 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2897 rc = -EIO;
2898 return rc;
2899}
2900
2901static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2902 struct CommandList *c)
2903{
2904 const u8 *cdb = c->Request.CDB;
2905 const u8 *lun = c->Header.LUN.LunAddrBytes;
2906
2907 dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
2908 txt, lun, cdb);
2909}
2910
2911static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2912 struct CommandList *cp)
2913{
2914 const struct ErrorInfo *ei = cp->err_info;
2915 struct device *d = &cp->h->pdev->dev;
2916 u8 sense_key, asc, ascq;
2917 int sense_len;
2918
2919 switch (ei->CommandStatus) {
2920 case CMD_TARGET_STATUS:
2921 if (ei->SenseLen > sizeof(ei->SenseInfo))
2922 sense_len = sizeof(ei->SenseInfo);
2923 else
2924 sense_len = ei->SenseLen;
2925 decode_sense_data(ei->SenseInfo, sense_len,
2926 &sense_key, &asc, &ascq);
2927 hpsa_print_cmd(h, "SCSI status", cp);
2928 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2929 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2930 sense_key, asc, ascq);
2931 else
2932 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2933 if (ei->ScsiStatus == 0)
2934 dev_warn(d, "SCSI status is abnormally zero. "
2935 "(probably indicates selection timeout "
2936 "reported incorrectly due to a known "
2937 "firmware bug, circa July, 2001.)\n");
2938 break;
2939 case CMD_DATA_UNDERRUN:
2940 break;
2941 case CMD_DATA_OVERRUN:
2942 hpsa_print_cmd(h, "overrun condition", cp);
2943 break;
2944 case CMD_INVALID: {
2945
2946
2947
2948 hpsa_print_cmd(h, "invalid command", cp);
2949 dev_warn(d, "probably means device no longer present\n");
2950 }
2951 break;
2952 case CMD_PROTOCOL_ERR:
2953 hpsa_print_cmd(h, "protocol error", cp);
2954 break;
2955 case CMD_HARDWARE_ERR:
2956 hpsa_print_cmd(h, "hardware error", cp);
2957 break;
2958 case CMD_CONNECTION_LOST:
2959 hpsa_print_cmd(h, "connection lost", cp);
2960 break;
2961 case CMD_ABORTED:
2962 hpsa_print_cmd(h, "aborted", cp);
2963 break;
2964 case CMD_ABORT_FAILED:
2965 hpsa_print_cmd(h, "abort failed", cp);
2966 break;
2967 case CMD_UNSOLICITED_ABORT:
2968 hpsa_print_cmd(h, "unsolicited abort", cp);
2969 break;
2970 case CMD_TIMEOUT:
2971 hpsa_print_cmd(h, "timed out", cp);
2972 break;
2973 case CMD_UNABORTABLE:
2974 hpsa_print_cmd(h, "unabortable", cp);
2975 break;
2976 case CMD_CTLR_LOCKUP:
2977 hpsa_print_cmd(h, "controller lockup detected", cp);
2978 break;
2979 default:
2980 hpsa_print_cmd(h, "unknown status", cp);
2981 dev_warn(d, "Unknown command status %x\n",
2982 ei->CommandStatus);
2983 }
2984}
2985
2986static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
2987 u8 page, u8 *buf, size_t bufsize)
2988{
2989 int rc = IO_OK;
2990 struct CommandList *c;
2991 struct ErrorInfo *ei;
2992
2993 c = cmd_alloc(h);
2994 if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize,
2995 page, scsi3addr, TYPE_CMD)) {
2996 rc = -1;
2997 goto out;
2998 }
2999 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3000 NO_TIMEOUT);
3001 if (rc)
3002 goto out;
3003 ei = c->err_info;
3004 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3005 hpsa_scsi_interpret_error(h, c);
3006 rc = -1;
3007 }
3008out:
3009 cmd_free(h, c);
3010 return rc;
3011}
3012
3013static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h,
3014 u8 *scsi3addr)
3015{
3016 u8 *buf;
3017 u64 sa = 0;
3018 int rc = 0;
3019
3020 buf = kzalloc(1024, GFP_KERNEL);
3021 if (!buf)
3022 return 0;
3023
3024 rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC,
3025 buf, 1024);
3026
3027 if (rc)
3028 goto out;
3029
3030 sa = get_unaligned_be64(buf+12);
3031
3032out:
3033 kfree(buf);
3034 return sa;
3035}
3036
3037static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
3038 u16 page, unsigned char *buf,
3039 unsigned char bufsize)
3040{
3041 int rc = IO_OK;
3042 struct CommandList *c;
3043 struct ErrorInfo *ei;
3044
3045 c = cmd_alloc(h);
3046
3047 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
3048 page, scsi3addr, TYPE_CMD)) {
3049 rc = -1;
3050 goto out;
3051 }
3052 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3053 NO_TIMEOUT);
3054 if (rc)
3055 goto out;
3056 ei = c->err_info;
3057 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3058 hpsa_scsi_interpret_error(h, c);
3059 rc = -1;
3060 }
3061out:
3062 cmd_free(h, c);
3063 return rc;
3064}
3065
3066static int hpsa_send_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3067 u8 reset_type, int reply_queue)
3068{
3069 int rc = IO_OK;
3070 struct CommandList *c;
3071 struct ErrorInfo *ei;
3072
3073 c = cmd_alloc(h);
3074 c->device = dev;
3075
3076
3077 (void) fill_cmd(c, reset_type, h, NULL, 0, 0, dev->scsi3addr, TYPE_MSG);
3078 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
3079 if (rc) {
3080 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
3081 goto out;
3082 }
3083
3084
3085 ei = c->err_info;
3086 if (ei->CommandStatus != 0) {
3087 hpsa_scsi_interpret_error(h, c);
3088 rc = -1;
3089 }
3090out:
3091 cmd_free(h, c);
3092 return rc;
3093}
3094
3095static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
3096 struct hpsa_scsi_dev_t *dev,
3097 unsigned char *scsi3addr)
3098{
3099 int i;
3100 bool match = false;
3101 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
3102 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
3103
3104 if (hpsa_is_cmd_idle(c))
3105 return false;
3106
3107 switch (c->cmd_type) {
3108 case CMD_SCSI:
3109 case CMD_IOCTL_PEND:
3110 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
3111 sizeof(c->Header.LUN.LunAddrBytes));
3112 break;
3113
3114 case CMD_IOACCEL1:
3115 case CMD_IOACCEL2:
3116 if (c->phys_disk == dev) {
3117
3118 match = true;
3119 } else {
3120
3121
3122
3123
3124 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3125
3126
3127
3128
3129 match = dev->phys_disk[i] == c->phys_disk;
3130 }
3131 }
3132 break;
3133
3134 case IOACCEL2_TMF:
3135 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3136 match = dev->phys_disk[i]->ioaccel_handle ==
3137 le32_to_cpu(ac->it_nexus);
3138 }
3139 break;
3140
3141 case 0:
3142 match = false;
3143 break;
3144
3145 default:
3146 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
3147 c->cmd_type);
3148 BUG();
3149 }
3150
3151 return match;
3152}
3153
3154static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3155 u8 reset_type, int reply_queue)
3156{
3157 int rc = 0;
3158
3159
3160 if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
3161 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
3162 return -EINTR;
3163 }
3164
3165 rc = hpsa_send_reset(h, dev, reset_type, reply_queue);
3166 if (!rc) {
3167
3168 atomic_dec(&dev->commands_outstanding);
3169 wait_event(h->event_sync_wait_queue,
3170 atomic_read(&dev->commands_outstanding) <= 0 ||
3171 lockup_detected(h));
3172 }
3173
3174 if (unlikely(lockup_detected(h))) {
3175 dev_warn(&h->pdev->dev,
3176 "Controller lockup detected during reset wait\n");
3177 rc = -ENODEV;
3178 }
3179
3180 if (!rc)
3181 rc = wait_for_device_to_become_ready(h, dev->scsi3addr, 0);
3182
3183 mutex_unlock(&h->reset_mutex);
3184 return rc;
3185}
3186
3187static void hpsa_get_raid_level(struct ctlr_info *h,
3188 unsigned char *scsi3addr, unsigned char *raid_level)
3189{
3190 int rc;
3191 unsigned char *buf;
3192
3193 *raid_level = RAID_UNKNOWN;
3194 buf = kzalloc(64, GFP_KERNEL);
3195 if (!buf)
3196 return;
3197
3198 if (!hpsa_vpd_page_supported(h, scsi3addr,
3199 HPSA_VPD_LV_DEVICE_GEOMETRY))
3200 goto exit;
3201
3202 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3203 HPSA_VPD_LV_DEVICE_GEOMETRY, buf, 64);
3204
3205 if (rc == 0)
3206 *raid_level = buf[8];
3207 if (*raid_level > RAID_UNKNOWN)
3208 *raid_level = RAID_UNKNOWN;
3209exit:
3210 kfree(buf);
3211 return;
3212}
3213
3214#define HPSA_MAP_DEBUG
3215#ifdef HPSA_MAP_DEBUG
3216static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
3217 struct raid_map_data *map_buff)
3218{
3219 struct raid_map_disk_data *dd = &map_buff->data[0];
3220 int map, row, col;
3221 u16 map_cnt, row_cnt, disks_per_row;
3222
3223 if (rc != 0)
3224 return;
3225
3226
3227 if (h->raid_offload_debug < 2)
3228 return;
3229
3230 dev_info(&h->pdev->dev, "structure_size = %u\n",
3231 le32_to_cpu(map_buff->structure_size));
3232 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
3233 le32_to_cpu(map_buff->volume_blk_size));
3234 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
3235 le64_to_cpu(map_buff->volume_blk_cnt));
3236 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
3237 map_buff->phys_blk_shift);
3238 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
3239 map_buff->parity_rotation_shift);
3240 dev_info(&h->pdev->dev, "strip_size = %u\n",
3241 le16_to_cpu(map_buff->strip_size));
3242 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
3243 le64_to_cpu(map_buff->disk_starting_blk));
3244 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
3245 le64_to_cpu(map_buff->disk_blk_cnt));
3246 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
3247 le16_to_cpu(map_buff->data_disks_per_row));
3248 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
3249 le16_to_cpu(map_buff->metadata_disks_per_row));
3250 dev_info(&h->pdev->dev, "row_cnt = %u\n",
3251 le16_to_cpu(map_buff->row_cnt));
3252 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
3253 le16_to_cpu(map_buff->layout_map_count));
3254 dev_info(&h->pdev->dev, "flags = 0x%x\n",
3255 le16_to_cpu(map_buff->flags));
3256 dev_info(&h->pdev->dev, "encryption = %s\n",
3257 le16_to_cpu(map_buff->flags) &
3258 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
3259 dev_info(&h->pdev->dev, "dekindex = %u\n",
3260 le16_to_cpu(map_buff->dekindex));
3261 map_cnt = le16_to_cpu(map_buff->layout_map_count);
3262 for (map = 0; map < map_cnt; map++) {
3263 dev_info(&h->pdev->dev, "Map%u:\n", map);
3264 row_cnt = le16_to_cpu(map_buff->row_cnt);
3265 for (row = 0; row < row_cnt; row++) {
3266 dev_info(&h->pdev->dev, " Row%u:\n", row);
3267 disks_per_row =
3268 le16_to_cpu(map_buff->data_disks_per_row);
3269 for (col = 0; col < disks_per_row; col++, dd++)
3270 dev_info(&h->pdev->dev,
3271 " D%02u: h=0x%04x xor=%u,%u\n",
3272 col, dd->ioaccel_handle,
3273 dd->xor_mult[0], dd->xor_mult[1]);
3274 disks_per_row =
3275 le16_to_cpu(map_buff->metadata_disks_per_row);
3276 for (col = 0; col < disks_per_row; col++, dd++)
3277 dev_info(&h->pdev->dev,
3278 " M%02u: h=0x%04x xor=%u,%u\n",
3279 col, dd->ioaccel_handle,
3280 dd->xor_mult[0], dd->xor_mult[1]);
3281 }
3282 }
3283}
3284#else
3285static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
3286 __attribute__((unused)) int rc,
3287 __attribute__((unused)) struct raid_map_data *map_buff)
3288{
3289}
3290#endif
3291
3292static int hpsa_get_raid_map(struct ctlr_info *h,
3293 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3294{
3295 int rc = 0;
3296 struct CommandList *c;
3297 struct ErrorInfo *ei;
3298
3299 c = cmd_alloc(h);
3300
3301 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
3302 sizeof(this_device->raid_map), 0,
3303 scsi3addr, TYPE_CMD)) {
3304 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
3305 cmd_free(h, c);
3306 return -1;
3307 }
3308 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3309 NO_TIMEOUT);
3310 if (rc)
3311 goto out;
3312 ei = c->err_info;
3313 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3314 hpsa_scsi_interpret_error(h, c);
3315 rc = -1;
3316 goto out;
3317 }
3318 cmd_free(h, c);
3319
3320
3321 if (le32_to_cpu(this_device->raid_map.structure_size) >
3322 sizeof(this_device->raid_map)) {
3323 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
3324 rc = -1;
3325 }
3326 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
3327 return rc;
3328out:
3329 cmd_free(h, c);
3330 return rc;
3331}
3332
3333static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
3334 unsigned char scsi3addr[], u16 bmic_device_index,
3335 struct bmic_sense_subsystem_info *buf, size_t bufsize)
3336{
3337 int rc = IO_OK;
3338 struct CommandList *c;
3339 struct ErrorInfo *ei;
3340
3341 c = cmd_alloc(h);
3342
3343 rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize,
3344 0, RAID_CTLR_LUNID, TYPE_CMD);
3345 if (rc)
3346 goto out;
3347
3348 c->Request.CDB[2] = bmic_device_index & 0xff;
3349 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3350
3351 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3352 NO_TIMEOUT);
3353 if (rc)
3354 goto out;
3355 ei = c->err_info;
3356 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3357 hpsa_scsi_interpret_error(h, c);
3358 rc = -1;
3359 }
3360out:
3361 cmd_free(h, c);
3362 return rc;
3363}
3364
3365static int hpsa_bmic_id_controller(struct ctlr_info *h,
3366 struct bmic_identify_controller *buf, size_t bufsize)
3367{
3368 int rc = IO_OK;
3369 struct CommandList *c;
3370 struct ErrorInfo *ei;
3371
3372 c = cmd_alloc(h);
3373
3374 rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize,
3375 0, RAID_CTLR_LUNID, TYPE_CMD);
3376 if (rc)
3377 goto out;
3378
3379 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3380 NO_TIMEOUT);
3381 if (rc)
3382 goto out;
3383 ei = c->err_info;
3384 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3385 hpsa_scsi_interpret_error(h, c);
3386 rc = -1;
3387 }
3388out:
3389 cmd_free(h, c);
3390 return rc;
3391}
3392
3393static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
3394 unsigned char scsi3addr[], u16 bmic_device_index,
3395 struct bmic_identify_physical_device *buf, size_t bufsize)
3396{
3397 int rc = IO_OK;
3398 struct CommandList *c;
3399 struct ErrorInfo *ei;
3400
3401 c = cmd_alloc(h);
3402 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
3403 0, RAID_CTLR_LUNID, TYPE_CMD);
3404 if (rc)
3405 goto out;
3406
3407 c->Request.CDB[2] = bmic_device_index & 0xff;
3408 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3409
3410 hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3411 NO_TIMEOUT);
3412 ei = c->err_info;
3413 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3414 hpsa_scsi_interpret_error(h, c);
3415 rc = -1;
3416 }
3417out:
3418 cmd_free(h, c);
3419
3420 return rc;
3421}
3422
3423
3424
3425
3426
3427
3428
3429static void hpsa_get_enclosure_info(struct ctlr_info *h,
3430 unsigned char *scsi3addr,
3431 struct ReportExtendedLUNdata *rlep, int rle_index,
3432 struct hpsa_scsi_dev_t *encl_dev)
3433{
3434 int rc = -1;
3435 struct CommandList *c = NULL;
3436 struct ErrorInfo *ei = NULL;
3437 struct bmic_sense_storage_box_params *bssbp = NULL;
3438 struct bmic_identify_physical_device *id_phys = NULL;
3439 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
3440 u16 bmic_device_index = 0;
3441
3442 encl_dev->eli =
3443 hpsa_get_enclosure_logical_identifier(h, scsi3addr);
3444
3445 bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
3446
3447 if (encl_dev->target == -1 || encl_dev->lun == -1) {
3448 rc = IO_OK;
3449 goto out;
3450 }
3451
3452 if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) {
3453 rc = IO_OK;
3454 goto out;
3455 }
3456
3457 bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL);
3458 if (!bssbp)
3459 goto out;
3460
3461 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3462 if (!id_phys)
3463 goto out;
3464
3465 rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index,
3466 id_phys, sizeof(*id_phys));
3467 if (rc) {
3468 dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n",
3469 __func__, encl_dev->external, bmic_device_index);
3470 goto out;
3471 }
3472
3473 c = cmd_alloc(h);
3474
3475 rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp,
3476 sizeof(*bssbp), 0, RAID_CTLR_LUNID, TYPE_CMD);
3477
3478 if (rc)
3479 goto out;
3480
3481 if (id_phys->phys_connector[1] == 'E')
3482 c->Request.CDB[5] = id_phys->box_index;
3483 else
3484 c->Request.CDB[5] = 0;
3485
3486 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3487 NO_TIMEOUT);
3488 if (rc)
3489 goto out;
3490
3491 ei = c->err_info;
3492 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3493 rc = -1;
3494 goto out;
3495 }
3496
3497 encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port;
3498 memcpy(&encl_dev->phys_connector[id_phys->active_path_number],
3499 bssbp->phys_connector, sizeof(bssbp->phys_connector));
3500
3501 rc = IO_OK;
3502out:
3503 kfree(bssbp);
3504 kfree(id_phys);
3505
3506 if (c)
3507 cmd_free(h, c);
3508
3509 if (rc != IO_OK)
3510 hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
3511 "Error, could not get enclosure information");
3512}
3513
3514static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
3515 unsigned char *scsi3addr)
3516{
3517 struct ReportExtendedLUNdata *physdev;
3518 u32 nphysicals;
3519 u64 sa = 0;
3520 int i;
3521
3522 physdev = kzalloc(sizeof(*physdev), GFP_KERNEL);
3523 if (!physdev)
3524 return 0;
3525
3526 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3527 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3528 kfree(physdev);
3529 return 0;
3530 }
3531 nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24;
3532
3533 for (i = 0; i < nphysicals; i++)
3534 if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) {
3535 sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]);
3536 break;
3537 }
3538
3539 kfree(physdev);
3540
3541 return sa;
3542}
3543
3544static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
3545 struct hpsa_scsi_dev_t *dev)
3546{
3547 int rc;
3548 u64 sa = 0;
3549
3550 if (is_hba_lunid(scsi3addr)) {
3551 struct bmic_sense_subsystem_info *ssi;
3552
3553 ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
3554 if (!ssi)
3555 return;
3556
3557 rc = hpsa_bmic_sense_subsystem_information(h,
3558 scsi3addr, 0, ssi, sizeof(*ssi));
3559 if (rc == 0) {
3560 sa = get_unaligned_be64(ssi->primary_world_wide_id);
3561 h->sas_address = sa;
3562 }
3563
3564 kfree(ssi);
3565 } else
3566 sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr);
3567
3568 dev->sas_address = sa;
3569}
3570
3571static void hpsa_ext_ctrl_present(struct ctlr_info *h,
3572 struct ReportExtendedLUNdata *physdev)
3573{
3574 u32 nphysicals;
3575 int i;
3576
3577 if (h->discovery_polling)
3578 return;
3579
3580 nphysicals = (get_unaligned_be32(physdev->LUNListLength) / 24) + 1;
3581
3582 for (i = 0; i < nphysicals; i++) {
3583 if (physdev->LUN[i].device_type ==
3584 BMIC_DEVICE_TYPE_CONTROLLER
3585 && !is_hba_lunid(physdev->LUN[i].lunid)) {
3586 dev_info(&h->pdev->dev,
3587 "External controller present, activate discovery polling and disable rld caching\n");
3588 hpsa_disable_rld_caching(h);
3589 h->discovery_polling = 1;
3590 break;
3591 }
3592 }
3593}
3594
3595
3596static bool hpsa_vpd_page_supported(struct ctlr_info *h,
3597 unsigned char scsi3addr[], u8 page)
3598{
3599 int rc;
3600 int i;
3601 int pages;
3602 unsigned char *buf, bufsize;
3603
3604 buf = kzalloc(256, GFP_KERNEL);
3605 if (!buf)
3606 return false;
3607
3608
3609 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3610 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3611 buf, HPSA_VPD_HEADER_SZ);
3612 if (rc != 0)
3613 goto exit_unsupported;
3614 pages = buf[3];
3615 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
3616 bufsize = pages + HPSA_VPD_HEADER_SZ;
3617 else
3618 bufsize = 255;
3619
3620
3621 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3622 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3623 buf, bufsize);
3624 if (rc != 0)
3625 goto exit_unsupported;
3626
3627 pages = buf[3];
3628 for (i = 1; i <= pages; i++)
3629 if (buf[3 + i] == page)
3630 goto exit_supported;
3631exit_unsupported:
3632 kfree(buf);
3633 return false;
3634exit_supported:
3635 kfree(buf);
3636 return true;
3637}
3638
3639
3640
3641
3642
3643
3644
3645
3646static void hpsa_get_ioaccel_status(struct ctlr_info *h,
3647 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3648{
3649 int rc;
3650 unsigned char *buf;
3651 u8 ioaccel_status;
3652
3653 this_device->offload_config = 0;
3654 this_device->offload_enabled = 0;
3655 this_device->offload_to_be_enabled = 0;
3656
3657 buf = kzalloc(64, GFP_KERNEL);
3658 if (!buf)
3659 return;
3660 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
3661 goto out;
3662 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3663 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
3664 if (rc != 0)
3665 goto out;
3666
3667#define IOACCEL_STATUS_BYTE 4
3668#define OFFLOAD_CONFIGURED_BIT 0x01
3669#define OFFLOAD_ENABLED_BIT 0x02
3670 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
3671 this_device->offload_config =
3672 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
3673 if (this_device->offload_config) {
3674 this_device->offload_to_be_enabled =
3675 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3676 if (hpsa_get_raid_map(h, scsi3addr, this_device))
3677 this_device->offload_to_be_enabled = 0;
3678 }
3679
3680out:
3681 kfree(buf);
3682 return;
3683}
3684
3685
3686static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
3687 unsigned char *device_id, int index, int buflen)
3688{
3689 int rc;
3690 unsigned char *buf;
3691
3692
3693 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID))
3694 return 1;
3695
3696 buf = kzalloc(64, GFP_KERNEL);
3697 if (!buf)
3698 return -ENOMEM;
3699
3700 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3701 HPSA_VPD_LV_DEVICE_ID, buf, 64);
3702 if (rc == 0) {
3703 if (buflen > 16)
3704 buflen = 16;
3705 memcpy(device_id, &buf[8], buflen);
3706 }
3707
3708 kfree(buf);
3709
3710 return rc;
3711}
3712
3713static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
3714 void *buf, int bufsize,
3715 int extended_response)
3716{
3717 int rc = IO_OK;
3718 struct CommandList *c;
3719 unsigned char scsi3addr[8];
3720 struct ErrorInfo *ei;
3721
3722 c = cmd_alloc(h);
3723
3724
3725 memset(scsi3addr, 0, sizeof(scsi3addr));
3726 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
3727 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
3728 rc = -EAGAIN;
3729 goto out;
3730 }
3731 if (extended_response)
3732 c->Request.CDB[1] = extended_response;
3733 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3734 NO_TIMEOUT);
3735 if (rc)
3736 goto out;
3737 ei = c->err_info;
3738 if (ei->CommandStatus != 0 &&
3739 ei->CommandStatus != CMD_DATA_UNDERRUN) {
3740 hpsa_scsi_interpret_error(h, c);
3741 rc = -EIO;
3742 } else {
3743 struct ReportLUNdata *rld = buf;
3744
3745 if (rld->extended_response_flag != extended_response) {
3746 if (!h->legacy_board) {
3747 dev_err(&h->pdev->dev,
3748 "report luns requested format %u, got %u\n",
3749 extended_response,
3750 rld->extended_response_flag);
3751 rc = -EINVAL;
3752 } else
3753 rc = -EOPNOTSUPP;
3754 }
3755 }
3756out:
3757 cmd_free(h, c);
3758 return rc;
3759}
3760
3761static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
3762 struct ReportExtendedLUNdata *buf, int bufsize)
3763{
3764 int rc;
3765 struct ReportLUNdata *lbuf;
3766
3767 rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3768 HPSA_REPORT_PHYS_EXTENDED);
3769 if (!rc || rc != -EOPNOTSUPP)
3770 return rc;
3771
3772
3773 lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL);
3774 if (!lbuf)
3775 return -ENOMEM;
3776
3777 rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0);
3778 if (!rc) {
3779 int i;
3780 u32 nphys;
3781
3782
3783 memcpy(buf, lbuf, 8);
3784 nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8;
3785 for (i = 0; i < nphys; i++)
3786 memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8);
3787 }
3788 kfree(lbuf);
3789 return rc;
3790}
3791
3792static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
3793 struct ReportLUNdata *buf, int bufsize)
3794{
3795 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
3796}
3797
3798static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
3799 int bus, int target, int lun)
3800{
3801 device->bus = bus;
3802 device->target = target;
3803 device->lun = lun;
3804}
3805
3806
3807static int hpsa_get_volume_status(struct ctlr_info *h,
3808 unsigned char scsi3addr[])
3809{
3810 int rc;
3811 int status;
3812 int size;
3813 unsigned char *buf;
3814
3815 buf = kzalloc(64, GFP_KERNEL);
3816 if (!buf)
3817 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3818
3819
3820 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
3821 goto exit_failed;
3822
3823
3824 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3825 buf, HPSA_VPD_HEADER_SZ);
3826 if (rc != 0)
3827 goto exit_failed;
3828 size = buf[3];
3829
3830
3831 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3832 buf, size + HPSA_VPD_HEADER_SZ);
3833 if (rc != 0)
3834 goto exit_failed;
3835 status = buf[4];
3836
3837 kfree(buf);
3838 return status;
3839exit_failed:
3840 kfree(buf);
3841 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3842}
3843
3844
3845
3846
3847
3848
3849
3850
3851static unsigned char hpsa_volume_offline(struct ctlr_info *h,
3852 unsigned char scsi3addr[])
3853{
3854 struct CommandList *c;
3855 unsigned char *sense;
3856 u8 sense_key, asc, ascq;
3857 int sense_len;
3858 int rc, ldstat = 0;
3859 u16 cmd_status;
3860 u8 scsi_status;
3861#define ASC_LUN_NOT_READY 0x04
3862#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3863#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3864
3865 c = cmd_alloc(h);
3866
3867 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
3868 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
3869 NO_TIMEOUT);
3870 if (rc) {
3871 cmd_free(h, c);
3872 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3873 }
3874 sense = c->err_info->SenseInfo;
3875 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3876 sense_len = sizeof(c->err_info->SenseInfo);
3877 else
3878 sense_len = c->err_info->SenseLen;
3879 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
3880 cmd_status = c->err_info->CommandStatus;
3881 scsi_status = c->err_info->ScsiStatus;
3882 cmd_free(h, c);
3883
3884
3885 ldstat = hpsa_get_volume_status(h, scsi3addr);
3886
3887
3888 switch (ldstat) {
3889 case HPSA_LV_FAILED:
3890 case HPSA_LV_UNDERGOING_ERASE:
3891 case HPSA_LV_NOT_AVAILABLE:
3892 case HPSA_LV_UNDERGOING_RPI:
3893 case HPSA_LV_PENDING_RPI:
3894 case HPSA_LV_ENCRYPTED_NO_KEY:
3895 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3896 case HPSA_LV_UNDERGOING_ENCRYPTION:
3897 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3898 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3899 return ldstat;
3900 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3901
3902
3903
3904 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3905 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3906 return ldstat;
3907 break;
3908 default:
3909 break;
3910 }
3911 return HPSA_LV_OK;
3912}
3913
3914static int hpsa_update_device_info(struct ctlr_info *h,
3915 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3916 unsigned char *is_OBDR_device)
3917{
3918
3919#define OBDR_SIG_OFFSET 43
3920#define OBDR_TAPE_SIG "$DR-10"
3921#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3922#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3923
3924 unsigned char *inq_buff;
3925 unsigned char *obdr_sig;
3926 int rc = 0;
3927
3928 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
3929 if (!inq_buff) {
3930 rc = -ENOMEM;
3931 goto bail_out;
3932 }
3933
3934
3935 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3936 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3937 dev_err(&h->pdev->dev,
3938 "%s: inquiry failed, device will be skipped.\n",
3939 __func__);
3940 rc = HPSA_INQUIRY_FAILED;
3941 goto bail_out;
3942 }
3943
3944 scsi_sanitize_inquiry_string(&inq_buff[8], 8);
3945 scsi_sanitize_inquiry_string(&inq_buff[16], 16);
3946
3947 this_device->devtype = (inq_buff[0] & 0x1f);
3948 memcpy(this_device->scsi3addr, scsi3addr, 8);
3949 memcpy(this_device->vendor, &inq_buff[8],
3950 sizeof(this_device->vendor));
3951 memcpy(this_device->model, &inq_buff[16],
3952 sizeof(this_device->model));
3953 this_device->rev = inq_buff[2];
3954 memset(this_device->device_id, 0,
3955 sizeof(this_device->device_id));
3956 if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
3957 sizeof(this_device->device_id)) < 0) {
3958 dev_err(&h->pdev->dev,
3959 "hpsa%d: %s: can't get device id for [%d:%d:%d:%d]\t%s\t%.16s\n",
3960 h->ctlr, __func__,
3961 h->scsi_host->host_no,
3962 this_device->bus, this_device->target,
3963 this_device->lun,
3964 scsi_device_type(this_device->devtype),
3965 this_device->model);
3966 rc = HPSA_LV_FAILED;
3967 goto bail_out;
3968 }
3969
3970 if ((this_device->devtype == TYPE_DISK ||
3971 this_device->devtype == TYPE_ZBC) &&
3972 is_logical_dev_addr_mode(scsi3addr)) {
3973 unsigned char volume_offline;
3974
3975 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
3976 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3977 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
3978 volume_offline = hpsa_volume_offline(h, scsi3addr);
3979 if (volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED &&
3980 h->legacy_board) {
3981
3982
3983
3984 dev_info(&h->pdev->dev,
3985 "C0:T%d:L%d Volume status not available, assuming online.\n",
3986 this_device->target, this_device->lun);
3987 volume_offline = 0;
3988 }
3989 this_device->volume_offline = volume_offline;
3990 if (volume_offline == HPSA_LV_FAILED) {
3991 rc = HPSA_LV_FAILED;
3992 dev_err(&h->pdev->dev,
3993 "%s: LV failed, device will be skipped.\n",
3994 __func__);
3995 goto bail_out;
3996 }
3997 } else {
3998 this_device->raid_level = RAID_UNKNOWN;
3999 this_device->offload_config = 0;
4000 this_device->offload_enabled = 0;
4001 this_device->offload_to_be_enabled = 0;
4002 this_device->hba_ioaccel_enabled = 0;
4003 this_device->volume_offline = 0;
4004 this_device->queue_depth = h->nr_cmds;
4005 }
4006
4007 if (this_device->external)
4008 this_device->queue_depth = EXTERNAL_QD;
4009
4010 if (is_OBDR_device) {
4011
4012
4013
4014 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
4015 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
4016 strncmp(obdr_sig, OBDR_TAPE_SIG,
4017 OBDR_SIG_LEN) == 0);
4018 }
4019 kfree(inq_buff);
4020 return 0;
4021
4022bail_out:
4023 kfree(inq_buff);
4024 return rc;
4025}
4026
4027
4028
4029
4030
4031
4032
4033static void figure_bus_target_lun(struct ctlr_info *h,
4034 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
4035{
4036 u32 lunid = get_unaligned_le32(lunaddrbytes);
4037
4038 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
4039
4040 if (is_hba_lunid(lunaddrbytes)) {
4041 int bus = HPSA_HBA_BUS;
4042
4043 if (!device->rev)
4044 bus = HPSA_LEGACY_HBA_BUS;
4045 hpsa_set_bus_target_lun(device,
4046 bus, 0, lunid & 0x3fff);
4047 } else
4048
4049 hpsa_set_bus_target_lun(device,
4050 HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
4051 return;
4052 }
4053
4054 if (device->external) {
4055 hpsa_set_bus_target_lun(device,
4056 HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff,
4057 lunid & 0x00ff);
4058 return;
4059 }
4060 hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS,
4061 0, lunid & 0x3fff);
4062}
4063
4064static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position,
4065 int i, int nphysicals, int nlocal_logicals)
4066{
4067
4068
4069
4070 int logicals_start = nphysicals + (raid_ctlr_position == 0);
4071
4072 if (i == raid_ctlr_position)
4073 return 0;
4074
4075 if (i < logicals_start)
4076 return 0;
4077
4078
4079 if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals)
4080 return 0;
4081
4082 return 1;
4083}
4084
4085
4086
4087
4088
4089
4090
4091static int hpsa_gather_lun_info(struct ctlr_info *h,
4092 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
4093 struct ReportLUNdata *logdev, u32 *nlogicals)
4094{
4095 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
4096 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
4097 return -1;
4098 }
4099 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
4100 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
4101 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
4102 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
4103 *nphysicals = HPSA_MAX_PHYS_LUN;
4104 }
4105 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
4106 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
4107 return -1;
4108 }
4109 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
4110
4111 if (*nlogicals > HPSA_MAX_LUN) {
4112 dev_warn(&h->pdev->dev,
4113 "maximum logical LUNs (%d) exceeded. "
4114 "%d LUNs ignored.\n", HPSA_MAX_LUN,
4115 *nlogicals - HPSA_MAX_LUN);
4116 *nlogicals = HPSA_MAX_LUN;
4117 }
4118 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
4119 dev_warn(&h->pdev->dev,
4120 "maximum logical + physical LUNs (%d) exceeded. "
4121 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
4122 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
4123 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
4124 }
4125 return 0;
4126}
4127
4128static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
4129 int i, int nphysicals, int nlogicals,
4130 struct ReportExtendedLUNdata *physdev_list,
4131 struct ReportLUNdata *logdev_list)
4132{
4133
4134
4135
4136
4137
4138 int logicals_start = nphysicals + (raid_ctlr_position == 0);
4139 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
4140
4141 if (i == raid_ctlr_position)
4142 return RAID_CTLR_LUNID;
4143
4144 if (i < logicals_start)
4145 return &physdev_list->LUN[i -
4146 (raid_ctlr_position == 0)].lunid[0];
4147
4148 if (i < last_device)
4149 return &logdev_list->LUN[i - nphysicals -
4150 (raid_ctlr_position == 0)][0];
4151 BUG();
4152 return NULL;
4153}
4154
4155
4156static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
4157 struct hpsa_scsi_dev_t *dev,
4158 struct ReportExtendedLUNdata *rlep, int rle_index,
4159 struct bmic_identify_physical_device *id_phys)
4160{
4161 int rc;
4162 struct ext_report_lun_entry *rle;
4163
4164 rle = &rlep->LUN[rle_index];
4165
4166 dev->ioaccel_handle = rle->ioaccel_handle;
4167 if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
4168 dev->hba_ioaccel_enabled = 1;
4169 memset(id_phys, 0, sizeof(*id_phys));
4170 rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0],
4171 GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys,
4172 sizeof(*id_phys));
4173 if (!rc)
4174
4175#define DRIVE_CMDS_RESERVED_FOR_FW 2
4176#define DRIVE_QUEUE_DEPTH 7
4177 dev->queue_depth =
4178 le16_to_cpu(id_phys->current_queue_depth_limit) -
4179 DRIVE_CMDS_RESERVED_FOR_FW;
4180 else
4181 dev->queue_depth = DRIVE_QUEUE_DEPTH;
4182}
4183
4184static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
4185 struct ReportExtendedLUNdata *rlep, int rle_index,
4186 struct bmic_identify_physical_device *id_phys)
4187{
4188 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
4189
4190 if ((rle->device_flags & 0x08) && this_device->ioaccel_handle)
4191 this_device->hba_ioaccel_enabled = 1;
4192
4193 memcpy(&this_device->active_path_index,
4194 &id_phys->active_path_number,
4195 sizeof(this_device->active_path_index));
4196 memcpy(&this_device->path_map,
4197 &id_phys->redundant_path_present_map,
4198 sizeof(this_device->path_map));
4199 memcpy(&this_device->box,
4200 &id_phys->alternate_paths_phys_box_on_port,
4201 sizeof(this_device->box));
4202 memcpy(&this_device->phys_connector,
4203 &id_phys->alternate_paths_phys_connector,
4204 sizeof(this_device->phys_connector));
4205 memcpy(&this_device->bay,
4206 &id_phys->phys_bay_in_box,
4207 sizeof(this_device->bay));
4208}
4209
4210
4211static int hpsa_set_local_logical_count(struct ctlr_info *h,
4212 struct bmic_identify_controller *id_ctlr,
4213 u32 *nlocals)
4214{
4215 int rc;
4216
4217 if (!id_ctlr) {
4218 dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n",
4219 __func__);
4220 return -ENOMEM;
4221 }
4222 memset(id_ctlr, 0, sizeof(*id_ctlr));
4223 rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
4224 if (!rc)
4225 if (id_ctlr->configured_logical_drive_count < 255)
4226 *nlocals = id_ctlr->configured_logical_drive_count;
4227 else
4228 *nlocals = le16_to_cpu(
4229 id_ctlr->extended_logical_unit_count);
4230 else
4231 *nlocals = -1;
4232 return rc;
4233}
4234
4235static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes)
4236{
4237 struct bmic_identify_physical_device *id_phys;
4238 bool is_spare = false;
4239 int rc;
4240
4241 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4242 if (!id_phys)
4243 return false;
4244
4245 rc = hpsa_bmic_id_physical_device(h,
4246 lunaddrbytes,
4247 GET_BMIC_DRIVE_NUMBER(lunaddrbytes),
4248 id_phys, sizeof(*id_phys));
4249 if (rc == 0)
4250 is_spare = (id_phys->more_flags >> 6) & 0x01;
4251
4252 kfree(id_phys);
4253 return is_spare;
4254}
4255
4256#define RPL_DEV_FLAG_NON_DISK 0x1
4257#define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2
4258#define RPL_DEV_FLAG_UNCONFIG_DISK 0x4
4259
4260#define BMIC_DEVICE_TYPE_ENCLOSURE 6
4261
4262static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes,
4263 struct ext_report_lun_entry *rle)
4264{
4265 u8 device_flags;
4266 u8 device_type;
4267
4268 if (!MASKED_DEVICE(lunaddrbytes))
4269 return false;
4270
4271 device_flags = rle->device_flags;
4272 device_type = rle->device_type;
4273
4274 if (device_flags & RPL_DEV_FLAG_NON_DISK) {
4275 if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE)
4276 return false;
4277 return true;
4278 }
4279
4280 if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED))
4281 return false;
4282
4283 if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK)
4284 return false;
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294 if (hpsa_is_disk_spare(h, lunaddrbytes))
4295 return true;
4296
4297 return false;
4298}
4299
4300static void hpsa_update_scsi_devices(struct ctlr_info *h)
4301{
4302
4303
4304
4305
4306
4307
4308
4309
4310
4311
4312 struct ReportExtendedLUNdata *physdev_list = NULL;
4313 struct ReportLUNdata *logdev_list = NULL;
4314 struct bmic_identify_physical_device *id_phys = NULL;
4315 struct bmic_identify_controller *id_ctlr = NULL;
4316 u32 nphysicals = 0;
4317 u32 nlogicals = 0;
4318 u32 nlocal_logicals = 0;
4319 u32 ndev_allocated = 0;
4320 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
4321 int ncurrent = 0;
4322 int i, n_ext_target_devs, ndevs_to_allocate;
4323 int raid_ctlr_position;
4324 bool physical_device;
4325 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
4326
4327 currentsd = kcalloc(HPSA_MAX_DEVICES, sizeof(*currentsd), GFP_KERNEL);
4328 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
4329 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
4330 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
4331 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4332 id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL);
4333
4334 if (!currentsd || !physdev_list || !logdev_list ||
4335 !tmpdevice || !id_phys || !id_ctlr) {
4336 dev_err(&h->pdev->dev, "out of memory\n");
4337 goto out;
4338 }
4339 memset(lunzerobits, 0, sizeof(lunzerobits));
4340
4341 h->drv_req_rescan = 0;
4342
4343 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
4344 logdev_list, &nlogicals)) {
4345 h->drv_req_rescan = 1;
4346 goto out;
4347 }
4348
4349
4350 if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) {
4351 dev_warn(&h->pdev->dev,
4352 "%s: Can't determine number of local logical devices.\n",
4353 __func__);
4354 }
4355
4356
4357
4358
4359
4360 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
4361
4362 hpsa_ext_ctrl_present(h, physdev_list);
4363
4364
4365 for (i = 0; i < ndevs_to_allocate; i++) {
4366 if (i >= HPSA_MAX_DEVICES) {
4367 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
4368 " %d devices ignored.\n", HPSA_MAX_DEVICES,
4369 ndevs_to_allocate - HPSA_MAX_DEVICES);
4370 break;
4371 }
4372
4373 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
4374 if (!currentsd[i]) {
4375 h->drv_req_rescan = 1;
4376 goto out;
4377 }
4378 ndev_allocated++;
4379 }
4380
4381 if (is_scsi_rev_5(h))
4382 raid_ctlr_position = 0;
4383 else
4384 raid_ctlr_position = nphysicals + nlogicals;
4385
4386
4387 n_ext_target_devs = 0;
4388 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
4389 u8 *lunaddrbytes, is_OBDR = 0;
4390 int rc = 0;
4391 int phys_dev_index = i - (raid_ctlr_position == 0);
4392 bool skip_device = false;
4393
4394 memset(tmpdevice, 0, sizeof(*tmpdevice));
4395
4396 physical_device = i < nphysicals + (raid_ctlr_position == 0);
4397
4398
4399 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
4400 i, nphysicals, nlogicals, physdev_list, logdev_list);
4401
4402
4403 tmpdevice->external =
4404 figure_external_status(h, raid_ctlr_position, i,
4405 nphysicals, nlocal_logicals);
4406
4407
4408
4409
4410 if (!tmpdevice->external && physical_device) {
4411 skip_device = hpsa_skip_device(h, lunaddrbytes,
4412 &physdev_list->LUN[phys_dev_index]);
4413 if (skip_device)
4414 continue;
4415 }
4416
4417
4418 rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
4419 &is_OBDR);
4420 if (rc == -ENOMEM) {
4421 dev_warn(&h->pdev->dev,
4422 "Out of memory, rescan deferred.\n");
4423 h->drv_req_rescan = 1;
4424 goto out;
4425 }
4426 if (rc) {
4427 h->drv_req_rescan = 1;
4428 continue;
4429 }
4430
4431 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
4432 this_device = currentsd[ncurrent];
4433
4434 *this_device = *tmpdevice;
4435 this_device->physical_device = physical_device;
4436
4437
4438
4439
4440
4441 if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device)
4442 this_device->expose_device = 0;
4443 else
4444 this_device->expose_device = 1;
4445
4446
4447
4448
4449
4450 if (this_device->physical_device && this_device->expose_device)
4451 hpsa_get_sas_address(h, lunaddrbytes, this_device);
4452
4453 switch (this_device->devtype) {
4454 case TYPE_ROM:
4455
4456
4457
4458
4459
4460
4461
4462 if (is_OBDR)
4463 ncurrent++;
4464 break;
4465 case TYPE_DISK:
4466 case TYPE_ZBC:
4467 if (this_device->physical_device) {
4468
4469
4470 this_device->offload_enabled = 0;
4471 hpsa_get_ioaccel_drive_info(h, this_device,
4472 physdev_list, phys_dev_index, id_phys);
4473 hpsa_get_path_info(this_device,
4474 physdev_list, phys_dev_index, id_phys);
4475 }
4476 ncurrent++;
4477 break;
4478 case TYPE_TAPE:
4479 case TYPE_MEDIUM_CHANGER:
4480 ncurrent++;
4481 break;
4482 case TYPE_ENCLOSURE:
4483 if (!this_device->external)
4484 hpsa_get_enclosure_info(h, lunaddrbytes,
4485 physdev_list, phys_dev_index,
4486 this_device);
4487 ncurrent++;
4488 break;
4489 case TYPE_RAID:
4490
4491
4492
4493
4494
4495 if (!is_hba_lunid(lunaddrbytes))
4496 break;
4497 ncurrent++;
4498 break;
4499 default:
4500 break;
4501 }
4502 if (ncurrent >= HPSA_MAX_DEVICES)
4503 break;
4504 }
4505
4506 if (h->sas_host == NULL) {
4507 int rc = 0;
4508
4509 rc = hpsa_add_sas_host(h);
4510 if (rc) {
4511 dev_warn(&h->pdev->dev,
4512 "Could not add sas host %d\n", rc);
4513 goto out;
4514 }
4515 }
4516
4517 adjust_hpsa_scsi_table(h, currentsd, ncurrent);
4518out:
4519 kfree(tmpdevice);
4520 for (i = 0; i < ndev_allocated; i++)
4521 kfree(currentsd[i]);
4522 kfree(currentsd);
4523 kfree(physdev_list);
4524 kfree(logdev_list);
4525 kfree(id_ctlr);
4526 kfree(id_phys);
4527}
4528
4529static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
4530 struct scatterlist *sg)
4531{
4532 u64 addr64 = (u64) sg_dma_address(sg);
4533 unsigned int len = sg_dma_len(sg);
4534
4535 desc->Addr = cpu_to_le64(addr64);
4536 desc->Len = cpu_to_le32(len);
4537 desc->Ext = 0;
4538}
4539
4540
4541
4542
4543
4544
4545static int hpsa_scatter_gather(struct ctlr_info *h,
4546 struct CommandList *cp,
4547 struct scsi_cmnd *cmd)
4548{
4549 struct scatterlist *sg;
4550 int use_sg, i, sg_limit, chained, last_sg;
4551 struct SGDescriptor *curr_sg;
4552
4553 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4554
4555 use_sg = scsi_dma_map(cmd);
4556 if (use_sg < 0)
4557 return use_sg;
4558
4559 if (!use_sg)
4560 goto sglist_finished;
4561
4562
4563
4564
4565
4566
4567
4568
4569 curr_sg = cp->SG;
4570 chained = use_sg > h->max_cmd_sg_entries;
4571 sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
4572 last_sg = scsi_sg_count(cmd) - 1;
4573 scsi_for_each_sg(cmd, sg, sg_limit, i) {
4574 hpsa_set_sg_descriptor(curr_sg, sg);
4575 curr_sg++;
4576 }
4577
4578 if (chained) {
4579
4580
4581
4582
4583
4584
4585 curr_sg = h->cmd_sg_list[cp->cmdindex];
4586 sg_limit = use_sg - sg_limit;
4587 for_each_sg(sg, sg, sg_limit, i) {
4588 hpsa_set_sg_descriptor(curr_sg, sg);
4589 curr_sg++;
4590 }
4591 }
4592
4593
4594 (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
4595
4596 if (use_sg + chained > h->maxSG)
4597 h->maxSG = use_sg + chained;
4598
4599 if (chained) {
4600 cp->Header.SGList = h->max_cmd_sg_entries;
4601 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
4602 if (hpsa_map_sg_chain_block(h, cp)) {
4603 scsi_dma_unmap(cmd);
4604 return -1;
4605 }
4606 return 0;
4607 }
4608
4609sglist_finished:
4610
4611 cp->Header.SGList = (u8) use_sg;
4612 cp->Header.SGTotal = cpu_to_le16(use_sg);
4613 return 0;
4614}
4615
4616static inline void warn_zero_length_transfer(struct ctlr_info *h,
4617 u8 *cdb, int cdb_len,
4618 const char *func)
4619{
4620 dev_warn(&h->pdev->dev,
4621 "%s: Blocking zero-length request: CDB:%*phN\n",
4622 func, cdb_len, cdb);
4623}
4624
4625#define IO_ACCEL_INELIGIBLE 1
4626
4627static bool is_zero_length_transfer(u8 *cdb)
4628{
4629 u32 block_cnt;
4630
4631
4632 switch (cdb[0]) {
4633 case READ_10:
4634 case WRITE_10:
4635 case VERIFY:
4636 case WRITE_VERIFY:
4637 block_cnt = get_unaligned_be16(&cdb[7]);
4638 break;
4639 case READ_12:
4640 case WRITE_12:
4641 case VERIFY_12:
4642 case WRITE_VERIFY_12:
4643 block_cnt = get_unaligned_be32(&cdb[6]);
4644 break;
4645 case READ_16:
4646 case WRITE_16:
4647 case VERIFY_16:
4648 block_cnt = get_unaligned_be32(&cdb[10]);
4649 break;
4650 default:
4651 return false;
4652 }
4653
4654 return block_cnt == 0;
4655}
4656
4657static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
4658{
4659 int is_write = 0;
4660 u32 block;
4661 u32 block_cnt;
4662
4663
4664 switch (cdb[0]) {
4665 case WRITE_6:
4666 case WRITE_12:
4667 is_write = 1;
4668
4669 case READ_6:
4670 case READ_12:
4671 if (*cdb_len == 6) {
4672 block = (((cdb[1] & 0x1F) << 16) |
4673 (cdb[2] << 8) |
4674 cdb[3]);
4675 block_cnt = cdb[4];
4676 if (block_cnt == 0)
4677 block_cnt = 256;
4678 } else {
4679 BUG_ON(*cdb_len != 12);
4680 block = get_unaligned_be32(&cdb[2]);
4681 block_cnt = get_unaligned_be32(&cdb[6]);
4682 }
4683 if (block_cnt > 0xffff)
4684 return IO_ACCEL_INELIGIBLE;
4685
4686 cdb[0] = is_write ? WRITE_10 : READ_10;
4687 cdb[1] = 0;
4688 cdb[2] = (u8) (block >> 24);
4689 cdb[3] = (u8) (block >> 16);
4690 cdb[4] = (u8) (block >> 8);
4691 cdb[5] = (u8) (block);
4692 cdb[6] = 0;
4693 cdb[7] = (u8) (block_cnt >> 8);
4694 cdb[8] = (u8) (block_cnt);
4695 cdb[9] = 0;
4696 *cdb_len = 10;
4697 break;
4698 }
4699 return 0;
4700}
4701
4702static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
4703 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4704 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4705{
4706 struct scsi_cmnd *cmd = c->scsi_cmd;
4707 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
4708 unsigned int len;
4709 unsigned int total_len = 0;
4710 struct scatterlist *sg;
4711 u64 addr64;
4712 int use_sg, i;
4713 struct SGDescriptor *curr_sg;
4714 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
4715
4716
4717 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
4718 atomic_dec(&phys_disk->ioaccel_cmds_out);
4719 return IO_ACCEL_INELIGIBLE;
4720 }
4721
4722 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
4723
4724 if (is_zero_length_transfer(cdb)) {
4725 warn_zero_length_transfer(h, cdb, cdb_len, __func__);
4726 atomic_dec(&phys_disk->ioaccel_cmds_out);
4727 return IO_ACCEL_INELIGIBLE;
4728 }
4729
4730 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4731 atomic_dec(&phys_disk->ioaccel_cmds_out);
4732 return IO_ACCEL_INELIGIBLE;
4733 }
4734
4735 c->cmd_type = CMD_IOACCEL1;
4736
4737
4738 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
4739 (c->cmdindex * sizeof(*cp));
4740 BUG_ON(c->busaddr & 0x0000007F);
4741
4742 use_sg = scsi_dma_map(cmd);
4743 if (use_sg < 0) {
4744 atomic_dec(&phys_disk->ioaccel_cmds_out);
4745 return use_sg;
4746 }
4747
4748 if (use_sg) {
4749 curr_sg = cp->SG;
4750 scsi_for_each_sg(cmd, sg, use_sg, i) {
4751 addr64 = (u64) sg_dma_address(sg);
4752 len = sg_dma_len(sg);
4753 total_len += len;
4754 curr_sg->Addr = cpu_to_le64(addr64);
4755 curr_sg->Len = cpu_to_le32(len);
4756 curr_sg->Ext = cpu_to_le32(0);
4757 curr_sg++;
4758 }
4759 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
4760
4761 switch (cmd->sc_data_direction) {
4762 case DMA_TO_DEVICE:
4763 control |= IOACCEL1_CONTROL_DATA_OUT;
4764 break;
4765 case DMA_FROM_DEVICE:
4766 control |= IOACCEL1_CONTROL_DATA_IN;
4767 break;
4768 case DMA_NONE:
4769 control |= IOACCEL1_CONTROL_NODATAXFER;
4770 break;
4771 default:
4772 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4773 cmd->sc_data_direction);
4774 BUG();
4775 break;
4776 }
4777 } else {
4778 control |= IOACCEL1_CONTROL_NODATAXFER;
4779 }
4780
4781 c->Header.SGList = use_sg;
4782
4783 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
4784 cp->transfer_len = cpu_to_le32(total_len);
4785 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
4786 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
4787 cp->control = cpu_to_le32(control);
4788 memcpy(cp->CDB, cdb, cdb_len);
4789 memcpy(cp->CISS_LUN, scsi3addr, 8);
4790
4791 enqueue_cmd_and_start_io(h, c);
4792 return 0;
4793}
4794
4795
4796
4797
4798
4799static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4800 struct CommandList *c)
4801{
4802 struct scsi_cmnd *cmd = c->scsi_cmd;
4803 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4804
4805 if (!dev)
4806 return -1;
4807
4808 c->phys_disk = dev;
4809
4810 if (dev->in_reset)
4811 return -1;
4812
4813 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
4814 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
4815}
4816
4817
4818
4819
4820static void set_encrypt_ioaccel2(struct ctlr_info *h,
4821 struct CommandList *c, struct io_accel2_cmd *cp)
4822{
4823 struct scsi_cmnd *cmd = c->scsi_cmd;
4824 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4825 struct raid_map_data *map = &dev->raid_map;
4826 u64 first_block;
4827
4828
4829 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
4830 return;
4831
4832 cp->dekindex = map->dekindex;
4833
4834
4835 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
4836
4837
4838
4839
4840
4841 switch (cmd->cmnd[0]) {
4842
4843 case READ_6:
4844 case WRITE_6:
4845 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
4846 (cmd->cmnd[2] << 8) |
4847 cmd->cmnd[3]);
4848 break;
4849 case WRITE_10:
4850 case READ_10:
4851
4852 case WRITE_12:
4853 case READ_12:
4854 first_block = get_unaligned_be32(&cmd->cmnd[2]);
4855 break;
4856 case WRITE_16:
4857 case READ_16:
4858 first_block = get_unaligned_be64(&cmd->cmnd[2]);
4859 break;
4860 default:
4861 dev_err(&h->pdev->dev,
4862 "ERROR: %s: size (0x%x) not supported for encryption\n",
4863 __func__, cmd->cmnd[0]);
4864 BUG();
4865 break;
4866 }
4867
4868 if (le32_to_cpu(map->volume_blk_size) != 512)
4869 first_block = first_block *
4870 le32_to_cpu(map->volume_blk_size)/512;
4871
4872 cp->tweak_lower = cpu_to_le32(first_block);
4873 cp->tweak_upper = cpu_to_le32(first_block >> 32);
4874}
4875
4876static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4877 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4878 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4879{
4880 struct scsi_cmnd *cmd = c->scsi_cmd;
4881 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
4882 struct ioaccel2_sg_element *curr_sg;
4883 int use_sg, i;
4884 struct scatterlist *sg;
4885 u64 addr64;
4886 u32 len;
4887 u32 total_len = 0;
4888
4889 if (!cmd->device)
4890 return -1;
4891
4892 if (!cmd->device->hostdata)
4893 return -1;
4894
4895 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4896
4897 if (is_zero_length_transfer(cdb)) {
4898 warn_zero_length_transfer(h, cdb, cdb_len, __func__);
4899 atomic_dec(&phys_disk->ioaccel_cmds_out);
4900 return IO_ACCEL_INELIGIBLE;
4901 }
4902
4903 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4904 atomic_dec(&phys_disk->ioaccel_cmds_out);
4905 return IO_ACCEL_INELIGIBLE;
4906 }
4907
4908 c->cmd_type = CMD_IOACCEL2;
4909
4910 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
4911 (c->cmdindex * sizeof(*cp));
4912 BUG_ON(c->busaddr & 0x0000007F);
4913
4914 memset(cp, 0, sizeof(*cp));
4915 cp->IU_type = IOACCEL2_IU_TYPE;
4916
4917 use_sg = scsi_dma_map(cmd);
4918 if (use_sg < 0) {
4919 atomic_dec(&phys_disk->ioaccel_cmds_out);
4920 return use_sg;
4921 }
4922
4923 if (use_sg) {
4924 curr_sg = cp->sg;
4925 if (use_sg > h->ioaccel_maxsg) {
4926 addr64 = le64_to_cpu(
4927 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4928 curr_sg->address = cpu_to_le64(addr64);
4929 curr_sg->length = 0;
4930 curr_sg->reserved[0] = 0;
4931 curr_sg->reserved[1] = 0;
4932 curr_sg->reserved[2] = 0;
4933 curr_sg->chain_indicator = IOACCEL2_CHAIN;
4934
4935 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4936 }
4937 scsi_for_each_sg(cmd, sg, use_sg, i) {
4938 addr64 = (u64) sg_dma_address(sg);
4939 len = sg_dma_len(sg);
4940 total_len += len;
4941 curr_sg->address = cpu_to_le64(addr64);
4942 curr_sg->length = cpu_to_le32(len);
4943 curr_sg->reserved[0] = 0;
4944 curr_sg->reserved[1] = 0;
4945 curr_sg->reserved[2] = 0;
4946 curr_sg->chain_indicator = 0;
4947 curr_sg++;
4948 }
4949
4950
4951
4952
4953 (curr_sg - 1)->chain_indicator = IOACCEL2_LAST_SG;
4954
4955 switch (cmd->sc_data_direction) {
4956 case DMA_TO_DEVICE:
4957 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4958 cp->direction |= IOACCEL2_DIR_DATA_OUT;
4959 break;
4960 case DMA_FROM_DEVICE:
4961 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4962 cp->direction |= IOACCEL2_DIR_DATA_IN;
4963 break;
4964 case DMA_NONE:
4965 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4966 cp->direction |= IOACCEL2_DIR_NO_DATA;
4967 break;
4968 default:
4969 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4970 cmd->sc_data_direction);
4971 BUG();
4972 break;
4973 }
4974 } else {
4975 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4976 cp->direction |= IOACCEL2_DIR_NO_DATA;
4977 }
4978
4979
4980 set_encrypt_ioaccel2(h, c, cp);
4981
4982 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
4983 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
4984 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
4985
4986 cp->data_len = cpu_to_le32(total_len);
4987 cp->err_ptr = cpu_to_le64(c->busaddr +
4988 offsetof(struct io_accel2_cmd, error_data));
4989 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
4990
4991
4992 if (use_sg > h->ioaccel_maxsg) {
4993 cp->sg_count = 1;
4994 cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0]));
4995 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
4996 atomic_dec(&phys_disk->ioaccel_cmds_out);
4997 scsi_dma_unmap(cmd);
4998 return -1;
4999 }
5000 } else
5001 cp->sg_count = (u8) use_sg;
5002
5003 if (phys_disk->in_reset) {
5004 cmd->result = DID_RESET << 16;
5005 return -1;
5006 }
5007
5008 enqueue_cmd_and_start_io(h, c);
5009 return 0;
5010}
5011
5012
5013
5014
5015static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
5016 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
5017 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
5018{
5019 if (!c->scsi_cmd->device)
5020 return -1;
5021
5022 if (!c->scsi_cmd->device->hostdata)
5023 return -1;
5024
5025 if (phys_disk->in_reset)
5026 return -1;
5027
5028
5029 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
5030 phys_disk->queue_depth) {
5031 atomic_dec(&phys_disk->ioaccel_cmds_out);
5032 return IO_ACCEL_INELIGIBLE;
5033 }
5034 if (h->transMethod & CFGTBL_Trans_io_accel1)
5035 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
5036 cdb, cdb_len, scsi3addr,
5037 phys_disk);
5038 else
5039 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
5040 cdb, cdb_len, scsi3addr,
5041 phys_disk);
5042}
5043
5044static void raid_map_helper(struct raid_map_data *map,
5045 int offload_to_mirror, u32 *map_index, u32 *current_group)
5046{
5047 if (offload_to_mirror == 0) {
5048
5049 *map_index %= le16_to_cpu(map->data_disks_per_row);
5050 return;
5051 }
5052 do {
5053
5054 *current_group = *map_index /
5055 le16_to_cpu(map->data_disks_per_row);
5056 if (offload_to_mirror == *current_group)
5057 continue;
5058 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
5059
5060 *map_index += le16_to_cpu(map->data_disks_per_row);
5061 (*current_group)++;
5062 } else {
5063
5064 *map_index %= le16_to_cpu(map->data_disks_per_row);
5065 *current_group = 0;
5066 }
5067 } while (offload_to_mirror != *current_group);
5068}
5069
5070
5071
5072
5073static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
5074 struct CommandList *c)
5075{
5076 struct scsi_cmnd *cmd = c->scsi_cmd;
5077 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5078 struct raid_map_data *map = &dev->raid_map;
5079 struct raid_map_disk_data *dd = &map->data[0];
5080 int is_write = 0;
5081 u32 map_index;
5082 u64 first_block, last_block;
5083 u32 block_cnt;
5084 u32 blocks_per_row;
5085 u64 first_row, last_row;
5086 u32 first_row_offset, last_row_offset;
5087 u32 first_column, last_column;
5088 u64 r0_first_row, r0_last_row;
5089 u32 r5or6_blocks_per_row;
5090 u64 r5or6_first_row, r5or6_last_row;
5091 u32 r5or6_first_row_offset, r5or6_last_row_offset;
5092 u32 r5or6_first_column, r5or6_last_column;
5093 u32 total_disks_per_row;
5094 u32 stripesize;
5095 u32 first_group, last_group, current_group;
5096 u32 map_row;
5097 u32 disk_handle;
5098 u64 disk_block;
5099 u32 disk_block_cnt;
5100 u8 cdb[16];
5101 u8 cdb_len;
5102 u16 strip_size;
5103#if BITS_PER_LONG == 32
5104 u64 tmpdiv;
5105#endif
5106 int offload_to_mirror;
5107
5108 if (!dev)
5109 return -1;
5110
5111 if (dev->in_reset)
5112 return -1;
5113
5114
5115 switch (cmd->cmnd[0]) {
5116 case WRITE_6:
5117 is_write = 1;
5118
5119 case READ_6:
5120 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
5121 (cmd->cmnd[2] << 8) |
5122 cmd->cmnd[3]);
5123 block_cnt = cmd->cmnd[4];
5124 if (block_cnt == 0)
5125 block_cnt = 256;
5126 break;
5127 case WRITE_10:
5128 is_write = 1;
5129
5130 case READ_10:
5131 first_block =
5132 (((u64) cmd->cmnd[2]) << 24) |
5133 (((u64) cmd->cmnd[3]) << 16) |
5134 (((u64) cmd->cmnd[4]) << 8) |
5135 cmd->cmnd[5];
5136 block_cnt =
5137 (((u32) cmd->cmnd[7]) << 8) |
5138 cmd->cmnd[8];
5139 break;
5140 case WRITE_12:
5141 is_write = 1;
5142
5143 case READ_12:
5144 first_block =
5145 (((u64) cmd->cmnd[2]) << 24) |
5146 (((u64) cmd->cmnd[3]) << 16) |
5147 (((u64) cmd->cmnd[4]) << 8) |
5148 cmd->cmnd[5];
5149 block_cnt =
5150 (((u32) cmd->cmnd[6]) << 24) |
5151 (((u32) cmd->cmnd[7]) << 16) |
5152 (((u32) cmd->cmnd[8]) << 8) |
5153 cmd->cmnd[9];
5154 break;
5155 case WRITE_16:
5156 is_write = 1;
5157
5158 case READ_16:
5159 first_block =
5160 (((u64) cmd->cmnd[2]) << 56) |
5161 (((u64) cmd->cmnd[3]) << 48) |
5162 (((u64) cmd->cmnd[4]) << 40) |
5163 (((u64) cmd->cmnd[5]) << 32) |
5164 (((u64) cmd->cmnd[6]) << 24) |
5165 (((u64) cmd->cmnd[7]) << 16) |
5166 (((u64) cmd->cmnd[8]) << 8) |
5167 cmd->cmnd[9];
5168 block_cnt =
5169 (((u32) cmd->cmnd[10]) << 24) |
5170 (((u32) cmd->cmnd[11]) << 16) |
5171 (((u32) cmd->cmnd[12]) << 8) |
5172 cmd->cmnd[13];
5173 break;
5174 default:
5175 return IO_ACCEL_INELIGIBLE;
5176 }
5177 last_block = first_block + block_cnt - 1;
5178
5179
5180 if (is_write && dev->raid_level != 0)
5181 return IO_ACCEL_INELIGIBLE;
5182
5183
5184 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
5185 last_block < first_block)
5186 return IO_ACCEL_INELIGIBLE;
5187
5188
5189 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
5190 le16_to_cpu(map->strip_size);
5191 strip_size = le16_to_cpu(map->strip_size);
5192#if BITS_PER_LONG == 32
5193 tmpdiv = first_block;
5194 (void) do_div(tmpdiv, blocks_per_row);
5195 first_row = tmpdiv;
5196 tmpdiv = last_block;
5197 (void) do_div(tmpdiv, blocks_per_row);
5198 last_row = tmpdiv;
5199 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5200 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5201 tmpdiv = first_row_offset;
5202 (void) do_div(tmpdiv, strip_size);
5203 first_column = tmpdiv;
5204 tmpdiv = last_row_offset;
5205 (void) do_div(tmpdiv, strip_size);
5206 last_column = tmpdiv;
5207#else
5208 first_row = first_block / blocks_per_row;
5209 last_row = last_block / blocks_per_row;
5210 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5211 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5212 first_column = first_row_offset / strip_size;
5213 last_column = last_row_offset / strip_size;
5214#endif
5215
5216
5217 if ((first_row != last_row) || (first_column != last_column))
5218 return IO_ACCEL_INELIGIBLE;
5219
5220
5221 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
5222 le16_to_cpu(map->metadata_disks_per_row);
5223 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5224 le16_to_cpu(map->row_cnt);
5225 map_index = (map_row * total_disks_per_row) + first_column;
5226
5227 switch (dev->raid_level) {
5228 case HPSA_RAID_0:
5229 break;
5230 case HPSA_RAID_1:
5231
5232
5233
5234
5235 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
5236 if (dev->offload_to_mirror)
5237 map_index += le16_to_cpu(map->data_disks_per_row);
5238 dev->offload_to_mirror = !dev->offload_to_mirror;
5239 break;
5240 case HPSA_RAID_ADM:
5241
5242
5243
5244 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
5245
5246 offload_to_mirror = dev->offload_to_mirror;
5247 raid_map_helper(map, offload_to_mirror,
5248 &map_index, ¤t_group);
5249
5250 offload_to_mirror =
5251 (offload_to_mirror >=
5252 le16_to_cpu(map->layout_map_count) - 1)
5253 ? 0 : offload_to_mirror + 1;
5254 dev->offload_to_mirror = offload_to_mirror;
5255
5256
5257
5258
5259 break;
5260 case HPSA_RAID_5:
5261 case HPSA_RAID_6:
5262 if (le16_to_cpu(map->layout_map_count) <= 1)
5263 break;
5264
5265
5266 r5or6_blocks_per_row =
5267 le16_to_cpu(map->strip_size) *
5268 le16_to_cpu(map->data_disks_per_row);
5269 BUG_ON(r5or6_blocks_per_row == 0);
5270 stripesize = r5or6_blocks_per_row *
5271 le16_to_cpu(map->layout_map_count);
5272#if BITS_PER_LONG == 32
5273 tmpdiv = first_block;
5274 first_group = do_div(tmpdiv, stripesize);
5275 tmpdiv = first_group;
5276 (void) do_div(tmpdiv, r5or6_blocks_per_row);
5277 first_group = tmpdiv;
5278 tmpdiv = last_block;
5279 last_group = do_div(tmpdiv, stripesize);
5280 tmpdiv = last_group;
5281 (void) do_div(tmpdiv, r5or6_blocks_per_row);
5282 last_group = tmpdiv;
5283#else
5284 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
5285 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
5286#endif
5287 if (first_group != last_group)
5288 return IO_ACCEL_INELIGIBLE;
5289
5290
5291#if BITS_PER_LONG == 32
5292 tmpdiv = first_block;
5293 (void) do_div(tmpdiv, stripesize);
5294 first_row = r5or6_first_row = r0_first_row = tmpdiv;
5295 tmpdiv = last_block;
5296 (void) do_div(tmpdiv, stripesize);
5297 r5or6_last_row = r0_last_row = tmpdiv;
5298#else
5299 first_row = r5or6_first_row = r0_first_row =
5300 first_block / stripesize;
5301 r5or6_last_row = r0_last_row = last_block / stripesize;
5302#endif
5303 if (r5or6_first_row != r5or6_last_row)
5304 return IO_ACCEL_INELIGIBLE;
5305
5306
5307
5308#if BITS_PER_LONG == 32
5309 tmpdiv = first_block;
5310 first_row_offset = do_div(tmpdiv, stripesize);
5311 tmpdiv = first_row_offset;
5312 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
5313 r5or6_first_row_offset = first_row_offset;
5314 tmpdiv = last_block;
5315 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
5316 tmpdiv = r5or6_last_row_offset;
5317 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
5318 tmpdiv = r5or6_first_row_offset;
5319 (void) do_div(tmpdiv, map->strip_size);
5320 first_column = r5or6_first_column = tmpdiv;
5321 tmpdiv = r5or6_last_row_offset;
5322 (void) do_div(tmpdiv, map->strip_size);
5323 r5or6_last_column = tmpdiv;
5324#else
5325 first_row_offset = r5or6_first_row_offset =
5326 (u32)((first_block % stripesize) %
5327 r5or6_blocks_per_row);
5328
5329 r5or6_last_row_offset =
5330 (u32)((last_block % stripesize) %
5331 r5or6_blocks_per_row);
5332
5333 first_column = r5or6_first_column =
5334 r5or6_first_row_offset / le16_to_cpu(map->strip_size);
5335 r5or6_last_column =
5336 r5or6_last_row_offset / le16_to_cpu(map->strip_size);
5337#endif
5338 if (r5or6_first_column != r5or6_last_column)
5339 return IO_ACCEL_INELIGIBLE;
5340
5341
5342 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5343 le16_to_cpu(map->row_cnt);
5344
5345 map_index = (first_group *
5346 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
5347 (map_row * total_disks_per_row) + first_column;
5348 break;
5349 default:
5350 return IO_ACCEL_INELIGIBLE;
5351 }
5352
5353 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
5354 return IO_ACCEL_INELIGIBLE;
5355
5356 c->phys_disk = dev->phys_disk[map_index];
5357 if (!c->phys_disk)
5358 return IO_ACCEL_INELIGIBLE;
5359
5360 disk_handle = dd[map_index].ioaccel_handle;
5361 disk_block = le64_to_cpu(map->disk_starting_blk) +
5362 first_row * le16_to_cpu(map->strip_size) +
5363 (first_row_offset - first_column *
5364 le16_to_cpu(map->strip_size));
5365 disk_block_cnt = block_cnt;
5366
5367
5368 if (map->phys_blk_shift) {
5369 disk_block <<= map->phys_blk_shift;
5370 disk_block_cnt <<= map->phys_blk_shift;
5371 }
5372 BUG_ON(disk_block_cnt > 0xffff);
5373
5374
5375 if (disk_block > 0xffffffff) {
5376 cdb[0] = is_write ? WRITE_16 : READ_16;
5377 cdb[1] = 0;
5378 cdb[2] = (u8) (disk_block >> 56);
5379 cdb[3] = (u8) (disk_block >> 48);
5380 cdb[4] = (u8) (disk_block >> 40);
5381 cdb[5] = (u8) (disk_block >> 32);
5382 cdb[6] = (u8) (disk_block >> 24);
5383 cdb[7] = (u8) (disk_block >> 16);
5384 cdb[8] = (u8) (disk_block >> 8);
5385 cdb[9] = (u8) (disk_block);
5386 cdb[10] = (u8) (disk_block_cnt >> 24);
5387 cdb[11] = (u8) (disk_block_cnt >> 16);
5388 cdb[12] = (u8) (disk_block_cnt >> 8);
5389 cdb[13] = (u8) (disk_block_cnt);
5390 cdb[14] = 0;
5391 cdb[15] = 0;
5392 cdb_len = 16;
5393 } else {
5394 cdb[0] = is_write ? WRITE_10 : READ_10;
5395 cdb[1] = 0;
5396 cdb[2] = (u8) (disk_block >> 24);
5397 cdb[3] = (u8) (disk_block >> 16);
5398 cdb[4] = (u8) (disk_block >> 8);
5399 cdb[5] = (u8) (disk_block);
5400 cdb[6] = 0;
5401 cdb[7] = (u8) (disk_block_cnt >> 8);
5402 cdb[8] = (u8) (disk_block_cnt);
5403 cdb[9] = 0;
5404 cdb_len = 10;
5405 }
5406 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
5407 dev->scsi3addr,
5408 dev->phys_disk[map_index]);
5409}
5410
5411
5412
5413
5414
5415
5416static int hpsa_ciss_submit(struct ctlr_info *h,
5417 struct CommandList *c, struct scsi_cmnd *cmd,
5418 struct hpsa_scsi_dev_t *dev)
5419{
5420 cmd->host_scribble = (unsigned char *) c;
5421 c->cmd_type = CMD_SCSI;
5422 c->scsi_cmd = cmd;
5423 c->Header.ReplyQueue = 0;
5424 memcpy(&c->Header.LUN.LunAddrBytes[0], &dev->scsi3addr[0], 8);
5425 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
5426
5427
5428
5429 c->Request.Timeout = 0;
5430 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
5431 c->Request.CDBLen = cmd->cmd_len;
5432 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
5433 switch (cmd->sc_data_direction) {
5434 case DMA_TO_DEVICE:
5435 c->Request.type_attr_dir =
5436 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
5437 break;
5438 case DMA_FROM_DEVICE:
5439 c->Request.type_attr_dir =
5440 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
5441 break;
5442 case DMA_NONE:
5443 c->Request.type_attr_dir =
5444 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
5445 break;
5446 case DMA_BIDIRECTIONAL:
5447
5448
5449
5450
5451
5452 c->Request.type_attr_dir =
5453 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
5454
5455
5456
5457
5458
5459
5460
5461
5462 break;
5463
5464 default:
5465 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
5466 cmd->sc_data_direction);
5467 BUG();
5468 break;
5469 }
5470
5471 if (hpsa_scatter_gather(h, c, cmd) < 0) {
5472 hpsa_cmd_resolve_and_free(h, c);
5473 return SCSI_MLQUEUE_HOST_BUSY;
5474 }
5475
5476 if (dev->in_reset) {
5477 hpsa_cmd_resolve_and_free(h, c);
5478 return SCSI_MLQUEUE_HOST_BUSY;
5479 }
5480
5481 enqueue_cmd_and_start_io(h, c);
5482
5483 return 0;
5484}
5485
5486static void hpsa_cmd_init(struct ctlr_info *h, int index,
5487 struct CommandList *c)
5488{
5489 dma_addr_t cmd_dma_handle, err_dma_handle;
5490
5491
5492 memset(c, 0, offsetof(struct CommandList, refcount));
5493 c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
5494 cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5495 c->err_info = h->errinfo_pool + index;
5496 memset(c->err_info, 0, sizeof(*c->err_info));
5497 err_dma_handle = h->errinfo_pool_dhandle
5498 + index * sizeof(*c->err_info);
5499 c->cmdindex = index;
5500 c->busaddr = (u32) cmd_dma_handle;
5501 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
5502 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
5503 c->h = h;
5504 c->scsi_cmd = SCSI_CMD_IDLE;
5505}
5506
5507static void hpsa_preinitialize_commands(struct ctlr_info *h)
5508{
5509 int i;
5510
5511 for (i = 0; i < h->nr_cmds; i++) {
5512 struct CommandList *c = h->cmd_pool + i;
5513
5514 hpsa_cmd_init(h, i, c);
5515 atomic_set(&c->refcount, 0);
5516 }
5517}
5518
5519static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
5520 struct CommandList *c)
5521{
5522 dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5523
5524 BUG_ON(c->cmdindex != index);
5525
5526 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
5527 memset(c->err_info, 0, sizeof(*c->err_info));
5528 c->busaddr = (u32) cmd_dma_handle;
5529}
5530
5531static int hpsa_ioaccel_submit(struct ctlr_info *h,
5532 struct CommandList *c, struct scsi_cmnd *cmd)
5533{
5534 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5535 int rc = IO_ACCEL_INELIGIBLE;
5536
5537 if (!dev)
5538 return SCSI_MLQUEUE_HOST_BUSY;
5539
5540 if (dev->in_reset)
5541 return SCSI_MLQUEUE_HOST_BUSY;
5542
5543 if (hpsa_simple_mode)
5544 return IO_ACCEL_INELIGIBLE;
5545
5546 cmd->host_scribble = (unsigned char *) c;
5547
5548 if (dev->offload_enabled) {
5549 hpsa_cmd_init(h, c->cmdindex, c);
5550 c->cmd_type = CMD_SCSI;
5551 c->scsi_cmd = cmd;
5552 rc = hpsa_scsi_ioaccel_raid_map(h, c);
5553 if (rc < 0)
5554 rc = SCSI_MLQUEUE_HOST_BUSY;
5555 } else if (dev->hba_ioaccel_enabled) {
5556 hpsa_cmd_init(h, c->cmdindex, c);
5557 c->cmd_type = CMD_SCSI;
5558 c->scsi_cmd = cmd;
5559 rc = hpsa_scsi_ioaccel_direct_map(h, c);
5560 if (rc < 0)
5561 rc = SCSI_MLQUEUE_HOST_BUSY;
5562 }
5563 return rc;
5564}
5565
5566static void hpsa_command_resubmit_worker(struct work_struct *work)
5567{
5568 struct scsi_cmnd *cmd;
5569 struct hpsa_scsi_dev_t *dev;
5570 struct CommandList *c = container_of(work, struct CommandList, work);
5571
5572 cmd = c->scsi_cmd;
5573 dev = cmd->device->hostdata;
5574 if (!dev) {
5575 cmd->result = DID_NO_CONNECT << 16;
5576 return hpsa_cmd_free_and_done(c->h, c, cmd);
5577 }
5578
5579 if (dev->in_reset) {
5580 cmd->result = DID_RESET << 16;
5581 return hpsa_cmd_free_and_done(c->h, c, cmd);
5582 }
5583
5584 if (c->cmd_type == CMD_IOACCEL2) {
5585 struct ctlr_info *h = c->h;
5586 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5587 int rc;
5588
5589 if (c2->error_data.serv_response ==
5590 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
5591 rc = hpsa_ioaccel_submit(h, c, cmd);
5592 if (rc == 0)
5593 return;
5594 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5595
5596
5597
5598
5599
5600 cmd->result = DID_IMM_RETRY << 16;
5601 return hpsa_cmd_free_and_done(h, c, cmd);
5602 }
5603
5604 }
5605 }
5606 hpsa_cmd_partial_init(c->h, c->cmdindex, c);
5607 if (hpsa_ciss_submit(c->h, c, cmd, dev)) {
5608
5609
5610
5611
5612
5613
5614
5615
5616 cmd->result = DID_IMM_RETRY << 16;
5617 cmd->scsi_done(cmd);
5618 }
5619}
5620
5621
5622static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
5623{
5624 struct ctlr_info *h;
5625 struct hpsa_scsi_dev_t *dev;
5626 struct CommandList *c;
5627 int rc = 0;
5628
5629
5630 h = sdev_to_hba(cmd->device);
5631
5632 BUG_ON(cmd->request->tag < 0);
5633
5634 dev = cmd->device->hostdata;
5635 if (!dev) {
5636 cmd->result = DID_NO_CONNECT << 16;
5637 cmd->scsi_done(cmd);
5638 return 0;
5639 }
5640
5641 if (dev->removed) {
5642 cmd->result = DID_NO_CONNECT << 16;
5643 cmd->scsi_done(cmd);
5644 return 0;
5645 }
5646
5647 if (unlikely(lockup_detected(h))) {
5648 cmd->result = DID_NO_CONNECT << 16;
5649 cmd->scsi_done(cmd);
5650 return 0;
5651 }
5652
5653 if (dev->in_reset)
5654 return SCSI_MLQUEUE_DEVICE_BUSY;
5655
5656 c = cmd_tagged_alloc(h, cmd);
5657 if (c == NULL)
5658 return SCSI_MLQUEUE_DEVICE_BUSY;
5659
5660
5661
5662
5663
5664 cmd->result = 0;
5665
5666
5667
5668
5669
5670 if (likely(cmd->retries == 0 &&
5671 !blk_rq_is_passthrough(cmd->request) &&
5672 h->acciopath_status)) {
5673 rc = hpsa_ioaccel_submit(h, c, cmd);
5674 if (rc == 0)
5675 return 0;
5676 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5677 hpsa_cmd_resolve_and_free(h, c);
5678 return SCSI_MLQUEUE_HOST_BUSY;
5679 }
5680 }
5681 return hpsa_ciss_submit(h, c, cmd, dev);
5682}
5683
5684static void hpsa_scan_complete(struct ctlr_info *h)
5685{
5686 unsigned long flags;
5687
5688 spin_lock_irqsave(&h->scan_lock, flags);
5689 h->scan_finished = 1;
5690 wake_up(&h->scan_wait_queue);
5691 spin_unlock_irqrestore(&h->scan_lock, flags);
5692}
5693
5694static void hpsa_scan_start(struct Scsi_Host *sh)
5695{
5696 struct ctlr_info *h = shost_to_hba(sh);
5697 unsigned long flags;
5698
5699
5700
5701
5702
5703
5704
5705 if (unlikely(lockup_detected(h)))
5706 return hpsa_scan_complete(h);
5707
5708
5709
5710
5711 spin_lock_irqsave(&h->scan_lock, flags);
5712 if (h->scan_waiting) {
5713 spin_unlock_irqrestore(&h->scan_lock, flags);
5714 return;
5715 }
5716
5717 spin_unlock_irqrestore(&h->scan_lock, flags);
5718
5719
5720 while (1) {
5721 spin_lock_irqsave(&h->scan_lock, flags);
5722 if (h->scan_finished)
5723 break;
5724 h->scan_waiting = 1;
5725 spin_unlock_irqrestore(&h->scan_lock, flags);
5726 wait_event(h->scan_wait_queue, h->scan_finished);
5727
5728
5729
5730
5731
5732 }
5733 h->scan_finished = 0;
5734 h->scan_waiting = 0;
5735 spin_unlock_irqrestore(&h->scan_lock, flags);
5736
5737 if (unlikely(lockup_detected(h)))
5738 return hpsa_scan_complete(h);
5739
5740
5741
5742
5743 spin_lock_irqsave(&h->reset_lock, flags);
5744 if (h->reset_in_progress) {
5745 h->drv_req_rescan = 1;
5746 spin_unlock_irqrestore(&h->reset_lock, flags);
5747 hpsa_scan_complete(h);
5748 return;
5749 }
5750 spin_unlock_irqrestore(&h->reset_lock, flags);
5751
5752 hpsa_update_scsi_devices(h);
5753
5754 hpsa_scan_complete(h);
5755}
5756
5757static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
5758{
5759 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
5760
5761 if (!logical_drive)
5762 return -ENODEV;
5763
5764 if (qdepth < 1)
5765 qdepth = 1;
5766 else if (qdepth > logical_drive->queue_depth)
5767 qdepth = logical_drive->queue_depth;
5768
5769 return scsi_change_queue_depth(sdev, qdepth);
5770}
5771
5772static int hpsa_scan_finished(struct Scsi_Host *sh,
5773 unsigned long elapsed_time)
5774{
5775 struct ctlr_info *h = shost_to_hba(sh);
5776 unsigned long flags;
5777 int finished;
5778
5779 spin_lock_irqsave(&h->scan_lock, flags);
5780 finished = h->scan_finished;
5781 spin_unlock_irqrestore(&h->scan_lock, flags);
5782 return finished;
5783}
5784
5785static int hpsa_scsi_host_alloc(struct ctlr_info *h)
5786{
5787 struct Scsi_Host *sh;
5788
5789 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
5790 if (sh == NULL) {
5791 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
5792 return -ENOMEM;
5793 }
5794
5795 sh->io_port = 0;
5796 sh->n_io_port = 0;
5797 sh->this_id = -1;
5798 sh->max_channel = 3;
5799 sh->max_cmd_len = MAX_COMMAND_SIZE;
5800 sh->max_lun = HPSA_MAX_LUN;
5801 sh->max_id = HPSA_MAX_LUN;
5802 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
5803 sh->cmd_per_lun = sh->can_queue;
5804 sh->sg_tablesize = h->maxsgentries;
5805 sh->transportt = hpsa_sas_transport_template;
5806 sh->hostdata[0] = (unsigned long) h;
5807 sh->irq = pci_irq_vector(h->pdev, 0);
5808 sh->unique_id = sh->irq;
5809
5810 h->scsi_host = sh;
5811 return 0;
5812}
5813
5814static int hpsa_scsi_add_host(struct ctlr_info *h)
5815{
5816 int rv;
5817
5818 rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
5819 if (rv) {
5820 dev_err(&h->pdev->dev, "scsi_add_host failed\n");
5821 return rv;
5822 }
5823 scsi_scan_host(h->scsi_host);
5824 return 0;
5825}
5826
5827
5828
5829
5830
5831
5832
5833static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
5834{
5835 int idx = scmd->request->tag;
5836
5837 if (idx < 0)
5838 return idx;
5839
5840
5841 return idx += HPSA_NRESERVED_CMDS;
5842}
5843
5844
5845
5846
5847
5848static int hpsa_send_test_unit_ready(struct ctlr_info *h,
5849 struct CommandList *c, unsigned char lunaddr[],
5850 int reply_queue)
5851{
5852 int rc;
5853
5854
5855 (void) fill_cmd(c, TEST_UNIT_READY, h,
5856 NULL, 0, 0, lunaddr, TYPE_CMD);
5857 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5858 if (rc)
5859 return rc;
5860
5861
5862
5863 if (c->err_info->CommandStatus == CMD_SUCCESS)
5864 return 0;
5865
5866
5867
5868
5869
5870
5871 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5872 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
5873 (c->err_info->SenseInfo[2] == NO_SENSE ||
5874 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
5875 return 0;
5876
5877 return 1;
5878}
5879
5880
5881
5882
5883
5884static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
5885 struct CommandList *c,
5886 unsigned char lunaddr[], int reply_queue)
5887{
5888 int rc;
5889 int count = 0;
5890 int waittime = 1;
5891
5892
5893 for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
5894
5895
5896
5897
5898
5899 msleep(1000 * waittime);
5900
5901 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
5902 if (!rc)
5903 break;
5904
5905
5906 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
5907 waittime *= 2;
5908
5909 dev_warn(&h->pdev->dev,
5910 "waiting %d secs for device to become ready.\n",
5911 waittime);
5912 }
5913
5914 return rc;
5915}
5916
5917static int wait_for_device_to_become_ready(struct ctlr_info *h,
5918 unsigned char lunaddr[],
5919 int reply_queue)
5920{
5921 int first_queue;
5922 int last_queue;
5923 int rq;
5924 int rc = 0;
5925 struct CommandList *c;
5926
5927 c = cmd_alloc(h);
5928
5929
5930
5931
5932
5933
5934 if (reply_queue == DEFAULT_REPLY_QUEUE) {
5935 first_queue = 0;
5936 last_queue = h->nreply_queues - 1;
5937 } else {
5938 first_queue = reply_queue;
5939 last_queue = reply_queue;
5940 }
5941
5942 for (rq = first_queue; rq <= last_queue; rq++) {
5943 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
5944 if (rc)
5945 break;
5946 }
5947
5948 if (rc)
5949 dev_warn(&h->pdev->dev, "giving up on device.\n");
5950 else
5951 dev_warn(&h->pdev->dev, "device is ready.\n");
5952
5953 cmd_free(h, c);
5954 return rc;
5955}
5956
5957
5958
5959
5960static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
5961{
5962 int rc = SUCCESS;
5963 int i;
5964 struct ctlr_info *h;
5965 struct hpsa_scsi_dev_t *dev = NULL;
5966 u8 reset_type;
5967 char msg[48];
5968 unsigned long flags;
5969
5970
5971 h = sdev_to_hba(scsicmd->device);
5972 if (h == NULL)
5973 return FAILED;
5974
5975 spin_lock_irqsave(&h->reset_lock, flags);
5976 h->reset_in_progress = 1;
5977 spin_unlock_irqrestore(&h->reset_lock, flags);
5978
5979 if (lockup_detected(h)) {
5980 rc = FAILED;
5981 goto return_reset_status;
5982 }
5983
5984 dev = scsicmd->device->hostdata;
5985 if (!dev) {
5986 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
5987 rc = FAILED;
5988 goto return_reset_status;
5989 }
5990
5991 if (dev->devtype == TYPE_ENCLOSURE) {
5992 rc = SUCCESS;
5993 goto return_reset_status;
5994 }
5995
5996
5997 if (lockup_detected(h)) {
5998 snprintf(msg, sizeof(msg),
5999 "cmd %d RESET FAILED, lockup detected",
6000 hpsa_get_cmd_index(scsicmd));
6001 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6002 rc = FAILED;
6003 goto return_reset_status;
6004 }
6005
6006
6007 if (detect_controller_lockup(h)) {
6008 snprintf(msg, sizeof(msg),
6009 "cmd %d RESET FAILED, new lockup detected",
6010 hpsa_get_cmd_index(scsicmd));
6011 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6012 rc = FAILED;
6013 goto return_reset_status;
6014 }
6015
6016
6017 if (is_hba_lunid(dev->scsi3addr)) {
6018 rc = SUCCESS;
6019 goto return_reset_status;
6020 }
6021
6022 if (is_logical_dev_addr_mode(dev->scsi3addr))
6023 reset_type = HPSA_DEVICE_RESET_MSG;
6024 else
6025 reset_type = HPSA_PHYS_TARGET_RESET;
6026
6027 sprintf(msg, "resetting %s",
6028 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
6029 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6030
6031
6032
6033
6034 dev->in_reset = true;
6035 for (i = 0; i < 10; i++) {
6036 if (atomic_read(&dev->commands_outstanding) > 0)
6037 msleep(1000);
6038 else
6039 break;
6040 }
6041
6042
6043 rc = hpsa_do_reset(h, dev, reset_type, DEFAULT_REPLY_QUEUE);
6044 if (rc == 0)
6045 rc = SUCCESS;
6046 else
6047 rc = FAILED;
6048
6049 sprintf(msg, "reset %s %s",
6050 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
6051 rc == SUCCESS ? "completed successfully" : "failed");
6052 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6053
6054return_reset_status:
6055 spin_lock_irqsave(&h->reset_lock, flags);
6056 h->reset_in_progress = 0;
6057 if (dev)
6058 dev->in_reset = false;
6059 spin_unlock_irqrestore(&h->reset_lock, flags);
6060 return rc;
6061}
6062
6063
6064
6065
6066
6067
6068
6069static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
6070 struct scsi_cmnd *scmd)
6071{
6072 int idx = hpsa_get_cmd_index(scmd);
6073 struct CommandList *c = h->cmd_pool + idx;
6074
6075 if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
6076 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
6077 idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
6078
6079
6080
6081 BUG();
6082 }
6083
6084 if (unlikely(!hpsa_is_cmd_idle(c))) {
6085
6086
6087
6088
6089
6090
6091 if (idx != h->last_collision_tag) {
6092 dev_warn(&h->pdev->dev,
6093 "%s: tag collision (tag=%d)\n", __func__, idx);
6094 if (scmd)
6095 scsi_print_command(scmd);
6096 h->last_collision_tag = idx;
6097 }
6098 return NULL;
6099 }
6100
6101 atomic_inc(&c->refcount);
6102
6103 hpsa_cmd_partial_init(h, idx, c);
6104 return c;
6105}
6106
6107static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
6108{
6109
6110
6111
6112
6113 (void)atomic_dec(&c->refcount);
6114}
6115
6116
6117
6118
6119
6120
6121
6122
6123
6124
6125static struct CommandList *cmd_alloc(struct ctlr_info *h)
6126{
6127 struct CommandList *c;
6128 int refcount, i;
6129 int offset = 0;
6130
6131
6132
6133
6134
6135
6136
6137
6138
6139
6140
6141
6142
6143
6144
6145
6146
6147
6148
6149
6150 for (;;) {
6151 i = find_next_zero_bit(h->cmd_pool_bits,
6152 HPSA_NRESERVED_CMDS,
6153 offset);
6154 if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
6155 offset = 0;
6156 continue;
6157 }
6158 c = h->cmd_pool + i;
6159 refcount = atomic_inc_return(&c->refcount);
6160 if (unlikely(refcount > 1)) {
6161 cmd_free(h, c);
6162 offset = (i + 1) % HPSA_NRESERVED_CMDS;
6163 continue;
6164 }
6165 set_bit(i & (BITS_PER_LONG - 1),
6166 h->cmd_pool_bits + (i / BITS_PER_LONG));
6167 break;
6168 }
6169 hpsa_cmd_partial_init(h, i, c);
6170 c->device = NULL;
6171 return c;
6172}
6173
6174
6175
6176
6177
6178
6179
6180static void cmd_free(struct ctlr_info *h, struct CommandList *c)
6181{
6182 if (atomic_dec_and_test(&c->refcount)) {
6183 int i;
6184
6185 i = c - h->cmd_pool;
6186 clear_bit(i & (BITS_PER_LONG - 1),
6187 h->cmd_pool_bits + (i / BITS_PER_LONG));
6188 }
6189}
6190
6191#ifdef CONFIG_COMPAT
6192
6193static int hpsa_ioctl32_passthru(struct scsi_device *dev, unsigned int cmd,
6194 void __user *arg)
6195{
6196 IOCTL32_Command_struct __user *arg32 =
6197 (IOCTL32_Command_struct __user *) arg;
6198 IOCTL_Command_struct arg64;
6199 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
6200 int err;
6201 u32 cp;
6202
6203 memset(&arg64, 0, sizeof(arg64));
6204 err = 0;
6205 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6206 sizeof(arg64.LUN_info));
6207 err |= copy_from_user(&arg64.Request, &arg32->Request,
6208 sizeof(arg64.Request));
6209 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
6210 sizeof(arg64.error_info));
6211 err |= get_user(arg64.buf_size, &arg32->buf_size);
6212 err |= get_user(cp, &arg32->buf);
6213 arg64.buf = compat_ptr(cp);
6214 err |= copy_to_user(p, &arg64, sizeof(arg64));
6215
6216 if (err)
6217 return -EFAULT;
6218
6219 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
6220 if (err)
6221 return err;
6222 err |= copy_in_user(&arg32->error_info, &p->error_info,
6223 sizeof(arg32->error_info));
6224 if (err)
6225 return -EFAULT;
6226 return err;
6227}
6228
6229static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
6230 unsigned int cmd, void __user *arg)
6231{
6232 BIG_IOCTL32_Command_struct __user *arg32 =
6233 (BIG_IOCTL32_Command_struct __user *) arg;
6234 BIG_IOCTL_Command_struct arg64;
6235 BIG_IOCTL_Command_struct __user *p =
6236 compat_alloc_user_space(sizeof(arg64));
6237 int err;
6238 u32 cp;
6239
6240 memset(&arg64, 0, sizeof(arg64));
6241 err = 0;
6242 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6243 sizeof(arg64.LUN_info));
6244 err |= copy_from_user(&arg64.Request, &arg32->Request,
6245 sizeof(arg64.Request));
6246 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
6247 sizeof(arg64.error_info));
6248 err |= get_user(arg64.buf_size, &arg32->buf_size);
6249 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
6250 err |= get_user(cp, &arg32->buf);
6251 arg64.buf = compat_ptr(cp);
6252 err |= copy_to_user(p, &arg64, sizeof(arg64));
6253
6254 if (err)
6255 return -EFAULT;
6256
6257 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
6258 if (err)
6259 return err;
6260 err |= copy_in_user(&arg32->error_info, &p->error_info,
6261 sizeof(arg32->error_info));
6262 if (err)
6263 return -EFAULT;
6264 return err;
6265}
6266
6267static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
6268 void __user *arg)
6269{
6270 switch (cmd) {
6271 case CCISS_GETPCIINFO:
6272 case CCISS_GETINTINFO:
6273 case CCISS_SETINTINFO:
6274 case CCISS_GETNODENAME:
6275 case CCISS_SETNODENAME:
6276 case CCISS_GETHEARTBEAT:
6277 case CCISS_GETBUSTYPES:
6278 case CCISS_GETFIRMVER:
6279 case CCISS_GETDRIVVER:
6280 case CCISS_REVALIDVOLS:
6281 case CCISS_DEREGDISK:
6282 case CCISS_REGNEWDISK:
6283 case CCISS_REGNEWD:
6284 case CCISS_RESCANDISK:
6285 case CCISS_GETLUNINFO:
6286 return hpsa_ioctl(dev, cmd, arg);
6287
6288 case CCISS_PASSTHRU32:
6289 return hpsa_ioctl32_passthru(dev, cmd, arg);
6290 case CCISS_BIG_PASSTHRU32:
6291 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
6292
6293 default:
6294 return -ENOIOCTLCMD;
6295 }
6296}
6297#endif
6298
6299static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
6300{
6301 struct hpsa_pci_info pciinfo;
6302
6303 if (!argp)
6304 return -EINVAL;
6305 pciinfo.domain = pci_domain_nr(h->pdev->bus);
6306 pciinfo.bus = h->pdev->bus->number;
6307 pciinfo.dev_fn = h->pdev->devfn;
6308 pciinfo.board_id = h->board_id;
6309 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
6310 return -EFAULT;
6311 return 0;
6312}
6313
6314static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
6315{
6316 DriverVer_type DriverVer;
6317 unsigned char vmaj, vmin, vsubmin;
6318 int rc;
6319
6320 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
6321 &vmaj, &vmin, &vsubmin);
6322 if (rc != 3) {
6323 dev_info(&h->pdev->dev, "driver version string '%s' "
6324 "unrecognized.", HPSA_DRIVER_VERSION);
6325 vmaj = 0;
6326 vmin = 0;
6327 vsubmin = 0;
6328 }
6329 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
6330 if (!argp)
6331 return -EINVAL;
6332 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
6333 return -EFAULT;
6334 return 0;
6335}
6336
6337static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6338{
6339 IOCTL_Command_struct iocommand;
6340 struct CommandList *c;
6341 char *buff = NULL;
6342 u64 temp64;
6343 int rc = 0;
6344
6345 if (!argp)
6346 return -EINVAL;
6347 if (!capable(CAP_SYS_RAWIO))
6348 return -EPERM;
6349 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
6350 return -EFAULT;
6351 if ((iocommand.buf_size < 1) &&
6352 (iocommand.Request.Type.Direction != XFER_NONE)) {
6353 return -EINVAL;
6354 }
6355 if (iocommand.buf_size > 0) {
6356 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
6357 if (buff == NULL)
6358 return -ENOMEM;
6359 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6360
6361 if (copy_from_user(buff, iocommand.buf,
6362 iocommand.buf_size)) {
6363 rc = -EFAULT;
6364 goto out_kfree;
6365 }
6366 } else {
6367 memset(buff, 0, iocommand.buf_size);
6368 }
6369 }
6370 c = cmd_alloc(h);
6371
6372
6373 c->cmd_type = CMD_IOCTL_PEND;
6374 c->scsi_cmd = SCSI_CMD_BUSY;
6375
6376 c->Header.ReplyQueue = 0;
6377 if (iocommand.buf_size > 0) {
6378 c->Header.SGList = 1;
6379 c->Header.SGTotal = cpu_to_le16(1);
6380 } else {
6381 c->Header.SGList = 0;
6382 c->Header.SGTotal = cpu_to_le16(0);
6383 }
6384 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
6385
6386
6387 memcpy(&c->Request, &iocommand.Request,
6388 sizeof(c->Request));
6389
6390
6391 if (iocommand.buf_size > 0) {
6392 temp64 = dma_map_single(&h->pdev->dev, buff,
6393 iocommand.buf_size, DMA_BIDIRECTIONAL);
6394 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
6395 c->SG[0].Addr = cpu_to_le64(0);
6396 c->SG[0].Len = cpu_to_le32(0);
6397 rc = -ENOMEM;
6398 goto out;
6399 }
6400 c->SG[0].Addr = cpu_to_le64(temp64);
6401 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
6402 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST);
6403 }
6404 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6405 NO_TIMEOUT);
6406 if (iocommand.buf_size > 0)
6407 hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL);
6408 check_ioctl_unit_attention(h, c);
6409 if (rc) {
6410 rc = -EIO;
6411 goto out;
6412 }
6413
6414
6415 memcpy(&iocommand.error_info, c->err_info,
6416 sizeof(iocommand.error_info));
6417 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
6418 rc = -EFAULT;
6419 goto out;
6420 }
6421 if ((iocommand.Request.Type.Direction & XFER_READ) &&
6422 iocommand.buf_size > 0) {
6423
6424 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
6425 rc = -EFAULT;
6426 goto out;
6427 }
6428 }
6429out:
6430 cmd_free(h, c);
6431out_kfree:
6432 kfree(buff);
6433 return rc;
6434}
6435
6436static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6437{
6438 BIG_IOCTL_Command_struct *ioc;
6439 struct CommandList *c;
6440 unsigned char **buff = NULL;
6441 int *buff_size = NULL;
6442 u64 temp64;
6443 BYTE sg_used = 0;
6444 int status = 0;
6445 u32 left;
6446 u32 sz;
6447 BYTE __user *data_ptr;
6448
6449 if (!argp)
6450 return -EINVAL;
6451 if (!capable(CAP_SYS_RAWIO))
6452 return -EPERM;
6453 ioc = vmemdup_user(argp, sizeof(*ioc));
6454 if (IS_ERR(ioc)) {
6455 status = PTR_ERR(ioc);
6456 goto cleanup1;
6457 }
6458 if ((ioc->buf_size < 1) &&
6459 (ioc->Request.Type.Direction != XFER_NONE)) {
6460 status = -EINVAL;
6461 goto cleanup1;
6462 }
6463
6464 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
6465 status = -EINVAL;
6466 goto cleanup1;
6467 }
6468 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
6469 status = -EINVAL;
6470 goto cleanup1;
6471 }
6472 buff = kcalloc(SG_ENTRIES_IN_CMD, sizeof(char *), GFP_KERNEL);
6473 if (!buff) {
6474 status = -ENOMEM;
6475 goto cleanup1;
6476 }
6477 buff_size = kmalloc_array(SG_ENTRIES_IN_CMD, sizeof(int), GFP_KERNEL);
6478 if (!buff_size) {
6479 status = -ENOMEM;
6480 goto cleanup1;
6481 }
6482 left = ioc->buf_size;
6483 data_ptr = ioc->buf;
6484 while (left) {
6485 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
6486 buff_size[sg_used] = sz;
6487 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
6488 if (buff[sg_used] == NULL) {
6489 status = -ENOMEM;
6490 goto cleanup1;
6491 }
6492 if (ioc->Request.Type.Direction & XFER_WRITE) {
6493 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
6494 status = -EFAULT;
6495 goto cleanup1;
6496 }
6497 } else
6498 memset(buff[sg_used], 0, sz);
6499 left -= sz;
6500 data_ptr += sz;
6501 sg_used++;
6502 }
6503 c = cmd_alloc(h);
6504
6505 c->cmd_type = CMD_IOCTL_PEND;
6506 c->scsi_cmd = SCSI_CMD_BUSY;
6507 c->Header.ReplyQueue = 0;
6508 c->Header.SGList = (u8) sg_used;
6509 c->Header.SGTotal = cpu_to_le16(sg_used);
6510 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
6511 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
6512 if (ioc->buf_size > 0) {
6513 int i;
6514 for (i = 0; i < sg_used; i++) {
6515 temp64 = dma_map_single(&h->pdev->dev, buff[i],
6516 buff_size[i], DMA_BIDIRECTIONAL);
6517 if (dma_mapping_error(&h->pdev->dev,
6518 (dma_addr_t) temp64)) {
6519 c->SG[i].Addr = cpu_to_le64(0);
6520 c->SG[i].Len = cpu_to_le32(0);
6521 hpsa_pci_unmap(h->pdev, c, i,
6522 DMA_BIDIRECTIONAL);
6523 status = -ENOMEM;
6524 goto cleanup0;
6525 }
6526 c->SG[i].Addr = cpu_to_le64(temp64);
6527 c->SG[i].Len = cpu_to_le32(buff_size[i]);
6528 c->SG[i].Ext = cpu_to_le32(0);
6529 }
6530 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
6531 }
6532 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6533 NO_TIMEOUT);
6534 if (sg_used)
6535 hpsa_pci_unmap(h->pdev, c, sg_used, DMA_BIDIRECTIONAL);
6536 check_ioctl_unit_attention(h, c);
6537 if (status) {
6538 status = -EIO;
6539 goto cleanup0;
6540 }
6541
6542
6543 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
6544 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
6545 status = -EFAULT;
6546 goto cleanup0;
6547 }
6548 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
6549 int i;
6550
6551
6552 BYTE __user *ptr = ioc->buf;
6553 for (i = 0; i < sg_used; i++) {
6554 if (copy_to_user(ptr, buff[i], buff_size[i])) {
6555 status = -EFAULT;
6556 goto cleanup0;
6557 }
6558 ptr += buff_size[i];
6559 }
6560 }
6561 status = 0;
6562cleanup0:
6563 cmd_free(h, c);
6564cleanup1:
6565 if (buff) {
6566 int i;
6567
6568 for (i = 0; i < sg_used; i++)
6569 kfree(buff[i]);
6570 kfree(buff);
6571 }
6572 kfree(buff_size);
6573 kvfree(ioc);
6574 return status;
6575}
6576
6577static void check_ioctl_unit_attention(struct ctlr_info *h,
6578 struct CommandList *c)
6579{
6580 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
6581 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
6582 (void) check_for_unit_attention(h, c);
6583}
6584
6585
6586
6587
6588static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
6589 void __user *arg)
6590{
6591 struct ctlr_info *h;
6592 void __user *argp = (void __user *)arg;
6593 int rc;
6594
6595 h = sdev_to_hba(dev);
6596
6597 switch (cmd) {
6598 case CCISS_DEREGDISK:
6599 case CCISS_REGNEWDISK:
6600 case CCISS_REGNEWD:
6601 hpsa_scan_start(h->scsi_host);
6602 return 0;
6603 case CCISS_GETPCIINFO:
6604 return hpsa_getpciinfo_ioctl(h, argp);
6605 case CCISS_GETDRIVVER:
6606 return hpsa_getdrivver_ioctl(h, argp);
6607 case CCISS_PASSTHRU:
6608 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6609 return -EAGAIN;
6610 rc = hpsa_passthru_ioctl(h, argp);
6611 atomic_inc(&h->passthru_cmds_avail);
6612 return rc;
6613 case CCISS_BIG_PASSTHRU:
6614 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6615 return -EAGAIN;
6616 rc = hpsa_big_passthru_ioctl(h, argp);
6617 atomic_inc(&h->passthru_cmds_avail);
6618 return rc;
6619 default:
6620 return -ENOTTY;
6621 }
6622}
6623
6624static void hpsa_send_host_reset(struct ctlr_info *h, u8 reset_type)
6625{
6626 struct CommandList *c;
6627
6628 c = cmd_alloc(h);
6629
6630
6631 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
6632 RAID_CTLR_LUNID, TYPE_MSG);
6633 c->Request.CDB[1] = reset_type;
6634 c->waiting = NULL;
6635 enqueue_cmd_and_start_io(h, c);
6636
6637
6638
6639
6640 return;
6641}
6642
6643static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
6644 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
6645 int cmd_type)
6646{
6647 enum dma_data_direction dir = DMA_NONE;
6648
6649 c->cmd_type = CMD_IOCTL_PEND;
6650 c->scsi_cmd = SCSI_CMD_BUSY;
6651 c->Header.ReplyQueue = 0;
6652 if (buff != NULL && size > 0) {
6653 c->Header.SGList = 1;
6654 c->Header.SGTotal = cpu_to_le16(1);
6655 } else {
6656 c->Header.SGList = 0;
6657 c->Header.SGTotal = cpu_to_le16(0);
6658 }
6659 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6660
6661 if (cmd_type == TYPE_CMD) {
6662 switch (cmd) {
6663 case HPSA_INQUIRY:
6664
6665 if (page_code & VPD_PAGE) {
6666 c->Request.CDB[1] = 0x01;
6667 c->Request.CDB[2] = (page_code & 0xff);
6668 }
6669 c->Request.CDBLen = 6;
6670 c->Request.type_attr_dir =
6671 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6672 c->Request.Timeout = 0;
6673 c->Request.CDB[0] = HPSA_INQUIRY;
6674 c->Request.CDB[4] = size & 0xFF;
6675 break;
6676 case RECEIVE_DIAGNOSTIC:
6677 c->Request.CDBLen = 6;
6678 c->Request.type_attr_dir =
6679 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6680 c->Request.Timeout = 0;
6681 c->Request.CDB[0] = cmd;
6682 c->Request.CDB[1] = 1;
6683 c->Request.CDB[2] = 1;
6684 c->Request.CDB[3] = (size >> 8) & 0xFF;
6685 c->Request.CDB[4] = size & 0xFF;
6686 break;
6687 case HPSA_REPORT_LOG:
6688 case HPSA_REPORT_PHYS:
6689
6690
6691
6692 c->Request.CDBLen = 12;
6693 c->Request.type_attr_dir =
6694 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6695 c->Request.Timeout = 0;
6696 c->Request.CDB[0] = cmd;
6697 c->Request.CDB[6] = (size >> 24) & 0xFF;
6698 c->Request.CDB[7] = (size >> 16) & 0xFF;
6699 c->Request.CDB[8] = (size >> 8) & 0xFF;
6700 c->Request.CDB[9] = size & 0xFF;
6701 break;
6702 case BMIC_SENSE_DIAG_OPTIONS:
6703 c->Request.CDBLen = 16;
6704 c->Request.type_attr_dir =
6705 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6706 c->Request.Timeout = 0;
6707
6708 c->Request.CDB[0] = BMIC_READ;
6709 c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS;
6710 break;
6711 case BMIC_SET_DIAG_OPTIONS:
6712 c->Request.CDBLen = 16;
6713 c->Request.type_attr_dir =
6714 TYPE_ATTR_DIR(cmd_type,
6715 ATTR_SIMPLE, XFER_WRITE);
6716 c->Request.Timeout = 0;
6717 c->Request.CDB[0] = BMIC_WRITE;
6718 c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS;
6719 break;
6720 case HPSA_CACHE_FLUSH:
6721 c->Request.CDBLen = 12;
6722 c->Request.type_attr_dir =
6723 TYPE_ATTR_DIR(cmd_type,
6724 ATTR_SIMPLE, XFER_WRITE);
6725 c->Request.Timeout = 0;
6726 c->Request.CDB[0] = BMIC_WRITE;
6727 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
6728 c->Request.CDB[7] = (size >> 8) & 0xFF;
6729 c->Request.CDB[8] = size & 0xFF;
6730 break;
6731 case TEST_UNIT_READY:
6732 c->Request.CDBLen = 6;
6733 c->Request.type_attr_dir =
6734 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6735 c->Request.Timeout = 0;
6736 break;
6737 case HPSA_GET_RAID_MAP:
6738 c->Request.CDBLen = 12;
6739 c->Request.type_attr_dir =
6740 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6741 c->Request.Timeout = 0;
6742 c->Request.CDB[0] = HPSA_CISS_READ;
6743 c->Request.CDB[1] = cmd;
6744 c->Request.CDB[6] = (size >> 24) & 0xFF;
6745 c->Request.CDB[7] = (size >> 16) & 0xFF;
6746 c->Request.CDB[8] = (size >> 8) & 0xFF;
6747 c->Request.CDB[9] = size & 0xFF;
6748 break;
6749 case BMIC_SENSE_CONTROLLER_PARAMETERS:
6750 c->Request.CDBLen = 10;
6751 c->Request.type_attr_dir =
6752 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6753 c->Request.Timeout = 0;
6754 c->Request.CDB[0] = BMIC_READ;
6755 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6756 c->Request.CDB[7] = (size >> 16) & 0xFF;
6757 c->Request.CDB[8] = (size >> 8) & 0xFF;
6758 break;
6759 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6760 c->Request.CDBLen = 10;
6761 c->Request.type_attr_dir =
6762 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6763 c->Request.Timeout = 0;
6764 c->Request.CDB[0] = BMIC_READ;
6765 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
6766 c->Request.CDB[7] = (size >> 16) & 0xFF;
6767 c->Request.CDB[8] = (size >> 8) & 0XFF;
6768 break;
6769 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
6770 c->Request.CDBLen = 10;
6771 c->Request.type_attr_dir =
6772 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6773 c->Request.Timeout = 0;
6774 c->Request.CDB[0] = BMIC_READ;
6775 c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION;
6776 c->Request.CDB[7] = (size >> 16) & 0xFF;
6777 c->Request.CDB[8] = (size >> 8) & 0XFF;
6778 break;
6779 case BMIC_SENSE_STORAGE_BOX_PARAMS:
6780 c->Request.CDBLen = 10;
6781 c->Request.type_attr_dir =
6782 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6783 c->Request.Timeout = 0;
6784 c->Request.CDB[0] = BMIC_READ;
6785 c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS;
6786 c->Request.CDB[7] = (size >> 16) & 0xFF;
6787 c->Request.CDB[8] = (size >> 8) & 0XFF;
6788 break;
6789 case BMIC_IDENTIFY_CONTROLLER:
6790 c->Request.CDBLen = 10;
6791 c->Request.type_attr_dir =
6792 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6793 c->Request.Timeout = 0;
6794 c->Request.CDB[0] = BMIC_READ;
6795 c->Request.CDB[1] = 0;
6796 c->Request.CDB[2] = 0;
6797 c->Request.CDB[3] = 0;
6798 c->Request.CDB[4] = 0;
6799 c->Request.CDB[5] = 0;
6800 c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER;
6801 c->Request.CDB[7] = (size >> 16) & 0xFF;
6802 c->Request.CDB[8] = (size >> 8) & 0XFF;
6803 c->Request.CDB[9] = 0;
6804 break;
6805 default:
6806 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
6807 BUG();
6808 }
6809 } else if (cmd_type == TYPE_MSG) {
6810 switch (cmd) {
6811
6812 case HPSA_PHYS_TARGET_RESET:
6813 c->Request.CDBLen = 16;
6814 c->Request.type_attr_dir =
6815 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6816 c->Request.Timeout = 0;
6817 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6818 c->Request.CDB[0] = HPSA_RESET;
6819 c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE;
6820
6821 c->Request.CDB[4] = 0x00;
6822 c->Request.CDB[5] = 0x00;
6823 c->Request.CDB[6] = 0x00;
6824 c->Request.CDB[7] = 0x00;
6825 break;
6826 case HPSA_DEVICE_RESET_MSG:
6827 c->Request.CDBLen = 16;
6828 c->Request.type_attr_dir =
6829 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6830 c->Request.Timeout = 0;
6831 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6832 c->Request.CDB[0] = cmd;
6833 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
6834
6835
6836 c->Request.CDB[4] = 0x00;
6837 c->Request.CDB[5] = 0x00;
6838 c->Request.CDB[6] = 0x00;
6839 c->Request.CDB[7] = 0x00;
6840 break;
6841 default:
6842 dev_warn(&h->pdev->dev, "unknown message type %d\n",
6843 cmd);
6844 BUG();
6845 }
6846 } else {
6847 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
6848 BUG();
6849 }
6850
6851 switch (GET_DIR(c->Request.type_attr_dir)) {
6852 case XFER_READ:
6853 dir = DMA_FROM_DEVICE;
6854 break;
6855 case XFER_WRITE:
6856 dir = DMA_TO_DEVICE;
6857 break;
6858 case XFER_NONE:
6859 dir = DMA_NONE;
6860 break;
6861 default:
6862 dir = DMA_BIDIRECTIONAL;
6863 }
6864 if (hpsa_map_one(h->pdev, c, buff, size, dir))
6865 return -1;
6866 return 0;
6867}
6868
6869
6870
6871
6872static void __iomem *remap_pci_mem(ulong base, ulong size)
6873{
6874 ulong page_base = ((ulong) base) & PAGE_MASK;
6875 ulong page_offs = ((ulong) base) - page_base;
6876 void __iomem *page_remapped = ioremap_nocache(page_base,
6877 page_offs + size);
6878
6879 return page_remapped ? (page_remapped + page_offs) : NULL;
6880}
6881
6882static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
6883{
6884 return h->access.command_completed(h, q);
6885}
6886
6887static inline bool interrupt_pending(struct ctlr_info *h)
6888{
6889 return h->access.intr_pending(h);
6890}
6891
6892static inline long interrupt_not_for_us(struct ctlr_info *h)
6893{
6894 return (h->access.intr_pending(h) == 0) ||
6895 (h->interrupts_enabled == 0);
6896}
6897
6898static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
6899 u32 raw_tag)
6900{
6901 if (unlikely(tag_index >= h->nr_cmds)) {
6902 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
6903 return 1;
6904 }
6905 return 0;
6906}
6907
6908static inline void finish_cmd(struct CommandList *c)
6909{
6910 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
6911 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
6912 || c->cmd_type == CMD_IOACCEL2))
6913 complete_scsi_command(c);
6914 else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
6915 complete(c->waiting);
6916}
6917
6918
6919static inline void process_indexed_cmd(struct ctlr_info *h,
6920 u32 raw_tag)
6921{
6922 u32 tag_index;
6923 struct CommandList *c;
6924
6925 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
6926 if (!bad_tag(h, tag_index, raw_tag)) {
6927 c = h->cmd_pool + tag_index;
6928 finish_cmd(c);
6929 }
6930}
6931
6932
6933
6934
6935
6936
6937static int ignore_bogus_interrupt(struct ctlr_info *h)
6938{
6939 if (likely(!reset_devices))
6940 return 0;
6941
6942 if (likely(h->interrupts_enabled))
6943 return 0;
6944
6945 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
6946 "(known firmware bug.) Ignoring.\n");
6947
6948 return 1;
6949}
6950
6951
6952
6953
6954
6955
6956static struct ctlr_info *queue_to_hba(u8 *queue)
6957{
6958 return container_of((queue - *queue), struct ctlr_info, q[0]);
6959}
6960
6961static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
6962{
6963 struct ctlr_info *h = queue_to_hba(queue);
6964 u8 q = *(u8 *) queue;
6965 u32 raw_tag;
6966
6967 if (ignore_bogus_interrupt(h))
6968 return IRQ_NONE;
6969
6970 if (interrupt_not_for_us(h))
6971 return IRQ_NONE;
6972 h->last_intr_timestamp = get_jiffies_64();
6973 while (interrupt_pending(h)) {
6974 raw_tag = get_next_completion(h, q);
6975 while (raw_tag != FIFO_EMPTY)
6976 raw_tag = next_command(h, q);
6977 }
6978 return IRQ_HANDLED;
6979}
6980
6981static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
6982{
6983 struct ctlr_info *h = queue_to_hba(queue);
6984 u32 raw_tag;
6985 u8 q = *(u8 *) queue;
6986
6987 if (ignore_bogus_interrupt(h))
6988 return IRQ_NONE;
6989
6990 h->last_intr_timestamp = get_jiffies_64();
6991 raw_tag = get_next_completion(h, q);
6992 while (raw_tag != FIFO_EMPTY)
6993 raw_tag = next_command(h, q);
6994 return IRQ_HANDLED;
6995}
6996
6997static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
6998{
6999 struct ctlr_info *h = queue_to_hba((u8 *) queue);
7000 u32 raw_tag;
7001 u8 q = *(u8 *) queue;
7002
7003 if (interrupt_not_for_us(h))
7004 return IRQ_NONE;
7005 h->last_intr_timestamp = get_jiffies_64();
7006 while (interrupt_pending(h)) {
7007 raw_tag = get_next_completion(h, q);
7008 while (raw_tag != FIFO_EMPTY) {
7009 process_indexed_cmd(h, raw_tag);
7010 raw_tag = next_command(h, q);
7011 }
7012 }
7013 return IRQ_HANDLED;
7014}
7015
7016static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
7017{
7018 struct ctlr_info *h = queue_to_hba(queue);
7019 u32 raw_tag;
7020 u8 q = *(u8 *) queue;
7021
7022 h->last_intr_timestamp = get_jiffies_64();
7023 raw_tag = get_next_completion(h, q);
7024 while (raw_tag != FIFO_EMPTY) {
7025 process_indexed_cmd(h, raw_tag);
7026 raw_tag = next_command(h, q);
7027 }
7028 return IRQ_HANDLED;
7029}
7030
7031
7032
7033
7034
7035static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
7036 unsigned char type)
7037{
7038 struct Command {
7039 struct CommandListHeader CommandHeader;
7040 struct RequestBlock Request;
7041 struct ErrDescriptor ErrorDescriptor;
7042 };
7043 struct Command *cmd;
7044 static const size_t cmd_sz = sizeof(*cmd) +
7045 sizeof(cmd->ErrorDescriptor);
7046 dma_addr_t paddr64;
7047 __le32 paddr32;
7048 u32 tag;
7049 void __iomem *vaddr;
7050 int i, err;
7051
7052 vaddr = pci_ioremap_bar(pdev, 0);
7053 if (vaddr == NULL)
7054 return -ENOMEM;
7055
7056
7057
7058
7059
7060 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7061 if (err) {
7062 iounmap(vaddr);
7063 return err;
7064 }
7065
7066 cmd = dma_alloc_coherent(&pdev->dev, cmd_sz, &paddr64, GFP_KERNEL);
7067 if (cmd == NULL) {
7068 iounmap(vaddr);
7069 return -ENOMEM;
7070 }
7071
7072
7073
7074
7075
7076 paddr32 = cpu_to_le32(paddr64);
7077
7078 cmd->CommandHeader.ReplyQueue = 0;
7079 cmd->CommandHeader.SGList = 0;
7080 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
7081 cmd->CommandHeader.tag = cpu_to_le64(paddr64);
7082 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
7083
7084 cmd->Request.CDBLen = 16;
7085 cmd->Request.type_attr_dir =
7086 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
7087 cmd->Request.Timeout = 0;
7088 cmd->Request.CDB[0] = opcode;
7089 cmd->Request.CDB[1] = type;
7090 memset(&cmd->Request.CDB[2], 0, 14);
7091 cmd->ErrorDescriptor.Addr =
7092 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
7093 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
7094
7095 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
7096
7097 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
7098 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
7099 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
7100 break;
7101 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
7102 }
7103
7104 iounmap(vaddr);
7105
7106
7107
7108
7109 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
7110 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
7111 opcode, type);
7112 return -ETIMEDOUT;
7113 }
7114
7115 dma_free_coherent(&pdev->dev, cmd_sz, cmd, paddr64);
7116
7117 if (tag & HPSA_ERROR_BIT) {
7118 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
7119 opcode, type);
7120 return -EIO;
7121 }
7122
7123 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
7124 opcode, type);
7125 return 0;
7126}
7127
7128#define hpsa_noop(p) hpsa_message(p, 3, 0)
7129
7130static int hpsa_controller_hard_reset(struct pci_dev *pdev,
7131 void __iomem *vaddr, u32 use_doorbell)
7132{
7133
7134 if (use_doorbell) {
7135
7136
7137
7138
7139 dev_info(&pdev->dev, "using doorbell to reset controller\n");
7140 writel(use_doorbell, vaddr + SA5_DOORBELL);
7141
7142
7143
7144
7145
7146
7147 msleep(10000);
7148 } else {
7149
7150
7151
7152
7153
7154
7155
7156
7157
7158 int rc = 0;
7159
7160 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
7161
7162
7163 rc = pci_set_power_state(pdev, PCI_D3hot);
7164 if (rc)
7165 return rc;
7166
7167 msleep(500);
7168
7169
7170 rc = pci_set_power_state(pdev, PCI_D0);
7171 if (rc)
7172 return rc;
7173
7174
7175
7176
7177
7178
7179 msleep(500);
7180 }
7181 return 0;
7182}
7183
7184static void init_driver_version(char *driver_version, int len)
7185{
7186 memset(driver_version, 0, len);
7187 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
7188}
7189
7190static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
7191{
7192 char *driver_version;
7193 int i, size = sizeof(cfgtable->driver_version);
7194
7195 driver_version = kmalloc(size, GFP_KERNEL);
7196 if (!driver_version)
7197 return -ENOMEM;
7198
7199 init_driver_version(driver_version, size);
7200 for (i = 0; i < size; i++)
7201 writeb(driver_version[i], &cfgtable->driver_version[i]);
7202 kfree(driver_version);
7203 return 0;
7204}
7205
7206static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
7207 unsigned char *driver_ver)
7208{
7209 int i;
7210
7211 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
7212 driver_ver[i] = readb(&cfgtable->driver_version[i]);
7213}
7214
7215static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
7216{
7217
7218 char *driver_ver, *old_driver_ver;
7219 int rc, size = sizeof(cfgtable->driver_version);
7220
7221 old_driver_ver = kmalloc_array(2, size, GFP_KERNEL);
7222 if (!old_driver_ver)
7223 return -ENOMEM;
7224 driver_ver = old_driver_ver + size;
7225
7226
7227
7228
7229 init_driver_version(old_driver_ver, size);
7230 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
7231 rc = !memcmp(driver_ver, old_driver_ver, size);
7232 kfree(old_driver_ver);
7233 return rc;
7234}
7235
7236
7237
7238static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
7239{
7240 u64 cfg_offset;
7241 u32 cfg_base_addr;
7242 u64 cfg_base_addr_index;
7243 void __iomem *vaddr;
7244 unsigned long paddr;
7245 u32 misc_fw_support;
7246 int rc;
7247 struct CfgTable __iomem *cfgtable;
7248 u32 use_doorbell;
7249 u16 command_register;
7250
7251
7252
7253
7254
7255
7256
7257
7258
7259
7260
7261
7262
7263
7264 if (!ctlr_is_resettable(board_id)) {
7265 dev_warn(&pdev->dev, "Controller not resettable\n");
7266 return -ENODEV;
7267 }
7268
7269
7270 if (!ctlr_is_hard_resettable(board_id))
7271 return -ENOTSUPP;
7272
7273
7274 pci_read_config_word(pdev, 4, &command_register);
7275 pci_save_state(pdev);
7276
7277
7278 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
7279 if (rc)
7280 return rc;
7281 vaddr = remap_pci_mem(paddr, 0x250);
7282 if (!vaddr)
7283 return -ENOMEM;
7284
7285
7286 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
7287 &cfg_base_addr_index, &cfg_offset);
7288 if (rc)
7289 goto unmap_vaddr;
7290 cfgtable = remap_pci_mem(pci_resource_start(pdev,
7291 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
7292 if (!cfgtable) {
7293 rc = -ENOMEM;
7294 goto unmap_vaddr;
7295 }
7296 rc = write_driver_ver_to_cfgtable(cfgtable);
7297 if (rc)
7298 goto unmap_cfgtable;
7299
7300
7301
7302
7303 misc_fw_support = readl(&cfgtable->misc_fw_support);
7304 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
7305 if (use_doorbell) {
7306 use_doorbell = DOORBELL_CTLR_RESET2;
7307 } else {
7308 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
7309 if (use_doorbell) {
7310 dev_warn(&pdev->dev,
7311 "Soft reset not supported. Firmware update is required.\n");
7312 rc = -ENOTSUPP;
7313 goto unmap_cfgtable;
7314 }
7315 }
7316
7317 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
7318 if (rc)
7319 goto unmap_cfgtable;
7320
7321 pci_restore_state(pdev);
7322 pci_write_config_word(pdev, 4, command_register);
7323
7324
7325
7326 msleep(HPSA_POST_RESET_PAUSE_MSECS);
7327
7328 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
7329 if (rc) {
7330 dev_warn(&pdev->dev,
7331 "Failed waiting for board to become ready after hard reset\n");
7332 goto unmap_cfgtable;
7333 }
7334
7335 rc = controller_reset_failed(vaddr);
7336 if (rc < 0)
7337 goto unmap_cfgtable;
7338 if (rc) {
7339 dev_warn(&pdev->dev, "Unable to successfully reset "
7340 "controller. Will try soft reset.\n");
7341 rc = -ENOTSUPP;
7342 } else {
7343 dev_info(&pdev->dev, "board ready after hard reset.\n");
7344 }
7345
7346unmap_cfgtable:
7347 iounmap(cfgtable);
7348
7349unmap_vaddr:
7350 iounmap(vaddr);
7351 return rc;
7352}
7353
7354
7355
7356
7357
7358
7359static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
7360{
7361#ifdef HPSA_DEBUG
7362 int i;
7363 char temp_name[17];
7364
7365 dev_info(dev, "Controller Configuration information\n");
7366 dev_info(dev, "------------------------------------\n");
7367 for (i = 0; i < 4; i++)
7368 temp_name[i] = readb(&(tb->Signature[i]));
7369 temp_name[4] = '\0';
7370 dev_info(dev, " Signature = %s\n", temp_name);
7371 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
7372 dev_info(dev, " Transport methods supported = 0x%x\n",
7373 readl(&(tb->TransportSupport)));
7374 dev_info(dev, " Transport methods active = 0x%x\n",
7375 readl(&(tb->TransportActive)));
7376 dev_info(dev, " Requested transport Method = 0x%x\n",
7377 readl(&(tb->HostWrite.TransportRequest)));
7378 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
7379 readl(&(tb->HostWrite.CoalIntDelay)));
7380 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
7381 readl(&(tb->HostWrite.CoalIntCount)));
7382 dev_info(dev, " Max outstanding commands = %d\n",
7383 readl(&(tb->CmdsOutMax)));
7384 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
7385 for (i = 0; i < 16; i++)
7386 temp_name[i] = readb(&(tb->ServerName[i]));
7387 temp_name[16] = '\0';
7388 dev_info(dev, " Server Name = %s\n", temp_name);
7389 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
7390 readl(&(tb->HeartBeat)));
7391#endif
7392}
7393
7394static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
7395{
7396 int i, offset, mem_type, bar_type;
7397
7398 if (pci_bar_addr == PCI_BASE_ADDRESS_0)
7399 return 0;
7400 offset = 0;
7401 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
7402 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
7403 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
7404 offset += 4;
7405 else {
7406 mem_type = pci_resource_flags(pdev, i) &
7407 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
7408 switch (mem_type) {
7409 case PCI_BASE_ADDRESS_MEM_TYPE_32:
7410 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
7411 offset += 4;
7412 break;
7413 case PCI_BASE_ADDRESS_MEM_TYPE_64:
7414 offset += 8;
7415 break;
7416 default:
7417 dev_warn(&pdev->dev,
7418 "base address is invalid\n");
7419 return -1;
7420 break;
7421 }
7422 }
7423 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
7424 return i + 1;
7425 }
7426 return -1;
7427}
7428
7429static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
7430{
7431 pci_free_irq_vectors(h->pdev);
7432 h->msix_vectors = 0;
7433}
7434
7435static void hpsa_setup_reply_map(struct ctlr_info *h)
7436{
7437 const struct cpumask *mask;
7438 unsigned int queue, cpu;
7439
7440 for (queue = 0; queue < h->msix_vectors; queue++) {
7441 mask = pci_irq_get_affinity(h->pdev, queue);
7442 if (!mask)
7443 goto fallback;
7444
7445 for_each_cpu(cpu, mask)
7446 h->reply_map[cpu] = queue;
7447 }
7448 return;
7449
7450fallback:
7451 for_each_possible_cpu(cpu)
7452 h->reply_map[cpu] = 0;
7453}
7454
7455
7456
7457
7458static int hpsa_interrupt_mode(struct ctlr_info *h)
7459{
7460 unsigned int flags = PCI_IRQ_LEGACY;
7461 int ret;
7462
7463
7464 switch (h->board_id) {
7465 case 0x40700E11:
7466 case 0x40800E11:
7467 case 0x40820E11:
7468 case 0x40830E11:
7469 break;
7470 default:
7471 ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES,
7472 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
7473 if (ret > 0) {
7474 h->msix_vectors = ret;
7475 return 0;
7476 }
7477
7478 flags |= PCI_IRQ_MSI;
7479 break;
7480 }
7481
7482 ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags);
7483 if (ret < 0)
7484 return ret;
7485 return 0;
7486}
7487
7488static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
7489 bool *legacy_board)
7490{
7491 int i;
7492 u32 subsystem_vendor_id, subsystem_device_id;
7493
7494 subsystem_vendor_id = pdev->subsystem_vendor;
7495 subsystem_device_id = pdev->subsystem_device;
7496 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
7497 subsystem_vendor_id;
7498
7499 if (legacy_board)
7500 *legacy_board = false;
7501 for (i = 0; i < ARRAY_SIZE(products); i++)
7502 if (*board_id == products[i].board_id) {
7503 if (products[i].access != &SA5A_access &&
7504 products[i].access != &SA5B_access)
7505 return i;
7506 dev_warn(&pdev->dev,
7507 "legacy board ID: 0x%08x\n",
7508 *board_id);
7509 if (legacy_board)
7510 *legacy_board = true;
7511 return i;
7512 }
7513
7514 dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x\n", *board_id);
7515 if (legacy_board)
7516 *legacy_board = true;
7517 return ARRAY_SIZE(products) - 1;
7518}
7519
7520static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
7521 unsigned long *memory_bar)
7522{
7523 int i;
7524
7525 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
7526 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
7527
7528 *memory_bar = pci_resource_start(pdev, i);
7529 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
7530 *memory_bar);
7531 return 0;
7532 }
7533 dev_warn(&pdev->dev, "no memory BAR found\n");
7534 return -ENODEV;
7535}
7536
7537static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
7538 int wait_for_ready)
7539{
7540 int i, iterations;
7541 u32 scratchpad;
7542 if (wait_for_ready)
7543 iterations = HPSA_BOARD_READY_ITERATIONS;
7544 else
7545 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
7546
7547 for (i = 0; i < iterations; i++) {
7548 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
7549 if (wait_for_ready) {
7550 if (scratchpad == HPSA_FIRMWARE_READY)
7551 return 0;
7552 } else {
7553 if (scratchpad != HPSA_FIRMWARE_READY)
7554 return 0;
7555 }
7556 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
7557 }
7558 dev_warn(&pdev->dev, "board not ready, timed out.\n");
7559 return -ENODEV;
7560}
7561
7562static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
7563 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
7564 u64 *cfg_offset)
7565{
7566 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
7567 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
7568 *cfg_base_addr &= (u32) 0x0000ffff;
7569 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
7570 if (*cfg_base_addr_index == -1) {
7571 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
7572 return -ENODEV;
7573 }
7574 return 0;
7575}
7576
7577static void hpsa_free_cfgtables(struct ctlr_info *h)
7578{
7579 if (h->transtable) {
7580 iounmap(h->transtable);
7581 h->transtable = NULL;
7582 }
7583 if (h->cfgtable) {
7584 iounmap(h->cfgtable);
7585 h->cfgtable = NULL;
7586 }
7587}
7588
7589
7590
7591
7592static int hpsa_find_cfgtables(struct ctlr_info *h)
7593{
7594 u64 cfg_offset;
7595 u32 cfg_base_addr;
7596 u64 cfg_base_addr_index;
7597 u32 trans_offset;
7598 int rc;
7599
7600 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7601 &cfg_base_addr_index, &cfg_offset);
7602 if (rc)
7603 return rc;
7604 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
7605 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
7606 if (!h->cfgtable) {
7607 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
7608 return -ENOMEM;
7609 }
7610 rc = write_driver_ver_to_cfgtable(h->cfgtable);
7611 if (rc)
7612 return rc;
7613
7614 trans_offset = readl(&h->cfgtable->TransMethodOffset);
7615 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
7616 cfg_base_addr_index)+cfg_offset+trans_offset,
7617 sizeof(*h->transtable));
7618 if (!h->transtable) {
7619 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
7620 hpsa_free_cfgtables(h);
7621 return -ENOMEM;
7622 }
7623 return 0;
7624}
7625
7626static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
7627{
7628#define MIN_MAX_COMMANDS 16
7629 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
7630
7631 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
7632
7633
7634 if (reset_devices && h->max_commands > 32)
7635 h->max_commands = 32;
7636
7637 if (h->max_commands < MIN_MAX_COMMANDS) {
7638 dev_warn(&h->pdev->dev,
7639 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7640 h->max_commands,
7641 MIN_MAX_COMMANDS);
7642 h->max_commands = MIN_MAX_COMMANDS;
7643 }
7644}
7645
7646
7647
7648
7649
7650static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
7651{
7652 return h->maxsgentries > 512;
7653}
7654
7655
7656
7657
7658
7659static void hpsa_find_board_params(struct ctlr_info *h)
7660{
7661 hpsa_get_max_perf_mode_cmds(h);
7662 h->nr_cmds = h->max_commands;
7663 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
7664 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
7665 if (hpsa_supports_chained_sg_blocks(h)) {
7666
7667 h->max_cmd_sg_entries = 32;
7668 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
7669 h->maxsgentries--;
7670 } else {
7671
7672
7673
7674
7675
7676 h->max_cmd_sg_entries = 31;
7677 h->maxsgentries = 31;
7678 h->chainsize = 0;
7679 }
7680
7681
7682 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
7683 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7684 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7685 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7686 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
7687 if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7688 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
7689}
7690
7691static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7692{
7693 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
7694 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
7695 return false;
7696 }
7697 return true;
7698}
7699
7700static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
7701{
7702 u32 driver_support;
7703
7704 driver_support = readl(&(h->cfgtable->driver_support));
7705
7706#ifdef CONFIG_X86
7707 driver_support |= ENABLE_SCSI_PREFETCH;
7708#endif
7709 driver_support |= ENABLE_UNIT_ATTN;
7710 writel(driver_support, &(h->cfgtable->driver_support));
7711}
7712
7713
7714
7715
7716static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7717{
7718 u32 dma_prefetch;
7719
7720 if (h->board_id != 0x3225103C)
7721 return;
7722 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7723 dma_prefetch |= 0x8000;
7724 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7725}
7726
7727static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
7728{
7729 int i;
7730 u32 doorbell_value;
7731 unsigned long flags;
7732
7733 for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
7734 spin_lock_irqsave(&h->lock, flags);
7735 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7736 spin_unlock_irqrestore(&h->lock, flags);
7737 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
7738 goto done;
7739
7740 msleep(CLEAR_EVENT_WAIT_INTERVAL);
7741 }
7742 return -ENODEV;
7743done:
7744 return 0;
7745}
7746
7747static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
7748{
7749 int i;
7750 u32 doorbell_value;
7751 unsigned long flags;
7752
7753
7754
7755
7756
7757 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
7758 if (h->remove_in_progress)
7759 goto done;
7760 spin_lock_irqsave(&h->lock, flags);
7761 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7762 spin_unlock_irqrestore(&h->lock, flags);
7763 if (!(doorbell_value & CFGTBL_ChangeReq))
7764 goto done;
7765
7766 msleep(MODE_CHANGE_WAIT_INTERVAL);
7767 }
7768 return -ENODEV;
7769done:
7770 return 0;
7771}
7772
7773
7774static int hpsa_enter_simple_mode(struct ctlr_info *h)
7775{
7776 u32 trans_support;
7777
7778 trans_support = readl(&(h->cfgtable->TransportSupport));
7779 if (!(trans_support & SIMPLE_MODE))
7780 return -ENOTSUPP;
7781
7782 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
7783
7784
7785 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
7786 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7787 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7788 if (hpsa_wait_for_mode_change_ack(h))
7789 goto error;
7790 print_cfg_table(&h->pdev->dev, h->cfgtable);
7791 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
7792 goto error;
7793 h->transMethod = CFGTBL_Trans_Simple;
7794 return 0;
7795error:
7796 dev_err(&h->pdev->dev, "failed to enter simple mode\n");
7797 return -ENODEV;
7798}
7799
7800
7801static void hpsa_free_pci_init(struct ctlr_info *h)
7802{
7803 hpsa_free_cfgtables(h);
7804 iounmap(h->vaddr);
7805 h->vaddr = NULL;
7806 hpsa_disable_interrupt_mode(h);
7807
7808
7809
7810
7811 pci_disable_device(h->pdev);
7812 pci_release_regions(h->pdev);
7813}
7814
7815
7816static int hpsa_pci_init(struct ctlr_info *h)
7817{
7818 int prod_index, err;
7819 bool legacy_board;
7820
7821 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id, &legacy_board);
7822 if (prod_index < 0)
7823 return prod_index;
7824 h->product_name = products[prod_index].product_name;
7825 h->access = *(products[prod_index].access);
7826 h->legacy_board = legacy_board;
7827 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
7828 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
7829
7830 err = pci_enable_device(h->pdev);
7831 if (err) {
7832 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
7833 pci_disable_device(h->pdev);
7834 return err;
7835 }
7836
7837 err = pci_request_regions(h->pdev, HPSA);
7838 if (err) {
7839 dev_err(&h->pdev->dev,
7840 "failed to obtain PCI resources\n");
7841 pci_disable_device(h->pdev);
7842 return err;
7843 }
7844
7845 pci_set_master(h->pdev);
7846
7847 err = hpsa_interrupt_mode(h);
7848 if (err)
7849 goto clean1;
7850
7851
7852 hpsa_setup_reply_map(h);
7853
7854 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
7855 if (err)
7856 goto clean2;
7857 h->vaddr = remap_pci_mem(h->paddr, 0x250);
7858 if (!h->vaddr) {
7859 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
7860 err = -ENOMEM;
7861 goto clean2;
7862 }
7863 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7864 if (err)
7865 goto clean3;
7866 err = hpsa_find_cfgtables(h);
7867 if (err)
7868 goto clean3;
7869 hpsa_find_board_params(h);
7870
7871 if (!hpsa_CISS_signature_present(h)) {
7872 err = -ENODEV;
7873 goto clean4;
7874 }
7875 hpsa_set_driver_support_bits(h);
7876 hpsa_p600_dma_prefetch_quirk(h);
7877 err = hpsa_enter_simple_mode(h);
7878 if (err)
7879 goto clean4;
7880 return 0;
7881
7882clean4:
7883 hpsa_free_cfgtables(h);
7884clean3:
7885 iounmap(h->vaddr);
7886 h->vaddr = NULL;
7887clean2:
7888 hpsa_disable_interrupt_mode(h);
7889clean1:
7890
7891
7892
7893
7894 pci_disable_device(h->pdev);
7895 pci_release_regions(h->pdev);
7896 return err;
7897}
7898
7899static void hpsa_hba_inquiry(struct ctlr_info *h)
7900{
7901 int rc;
7902
7903#define HBA_INQUIRY_BYTE_COUNT 64
7904 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
7905 if (!h->hba_inquiry_data)
7906 return;
7907 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
7908 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
7909 if (rc != 0) {
7910 kfree(h->hba_inquiry_data);
7911 h->hba_inquiry_data = NULL;
7912 }
7913}
7914
7915static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
7916{
7917 int rc, i;
7918 void __iomem *vaddr;
7919
7920 if (!reset_devices)
7921 return 0;
7922
7923
7924
7925
7926
7927 rc = pci_enable_device(pdev);
7928 if (rc) {
7929 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
7930 return -ENODEV;
7931 }
7932 pci_disable_device(pdev);
7933 msleep(260);
7934 rc = pci_enable_device(pdev);
7935 if (rc) {
7936 dev_warn(&pdev->dev, "failed to enable device.\n");
7937 return -ENODEV;
7938 }
7939
7940 pci_set_master(pdev);
7941
7942 vaddr = pci_ioremap_bar(pdev, 0);
7943 if (vaddr == NULL) {
7944 rc = -ENOMEM;
7945 goto out_disable;
7946 }
7947 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
7948 iounmap(vaddr);
7949
7950
7951 rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
7952
7953
7954
7955
7956
7957
7958 if (rc)
7959 goto out_disable;
7960
7961
7962 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
7963 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
7964 if (hpsa_noop(pdev) == 0)
7965 break;
7966 else
7967 dev_warn(&pdev->dev, "no-op failed%s\n",
7968 (i < 11 ? "; re-trying" : ""));
7969 }
7970
7971out_disable:
7972
7973 pci_disable_device(pdev);
7974 return rc;
7975}
7976
7977static void hpsa_free_cmd_pool(struct ctlr_info *h)
7978{
7979 kfree(h->cmd_pool_bits);
7980 h->cmd_pool_bits = NULL;
7981 if (h->cmd_pool) {
7982 dma_free_coherent(&h->pdev->dev,
7983 h->nr_cmds * sizeof(struct CommandList),
7984 h->cmd_pool,
7985 h->cmd_pool_dhandle);
7986 h->cmd_pool = NULL;
7987 h->cmd_pool_dhandle = 0;
7988 }
7989 if (h->errinfo_pool) {
7990 dma_free_coherent(&h->pdev->dev,
7991 h->nr_cmds * sizeof(struct ErrorInfo),
7992 h->errinfo_pool,
7993 h->errinfo_pool_dhandle);
7994 h->errinfo_pool = NULL;
7995 h->errinfo_pool_dhandle = 0;
7996 }
7997}
7998
7999static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
8000{
8001 h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG),
8002 sizeof(unsigned long),
8003 GFP_KERNEL);
8004 h->cmd_pool = dma_alloc_coherent(&h->pdev->dev,
8005 h->nr_cmds * sizeof(*h->cmd_pool),
8006 &h->cmd_pool_dhandle, GFP_KERNEL);
8007 h->errinfo_pool = dma_alloc_coherent(&h->pdev->dev,
8008 h->nr_cmds * sizeof(*h->errinfo_pool),
8009 &h->errinfo_pool_dhandle, GFP_KERNEL);
8010 if ((h->cmd_pool_bits == NULL)
8011 || (h->cmd_pool == NULL)
8012 || (h->errinfo_pool == NULL)) {
8013 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
8014 goto clean_up;
8015 }
8016 hpsa_preinitialize_commands(h);
8017 return 0;
8018clean_up:
8019 hpsa_free_cmd_pool(h);
8020 return -ENOMEM;
8021}
8022
8023
8024static void hpsa_free_irqs(struct ctlr_info *h)
8025{
8026 int i;
8027 int irq_vector = 0;
8028
8029 if (hpsa_simple_mode)
8030 irq_vector = h->intr_mode;
8031
8032 if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
8033
8034 free_irq(pci_irq_vector(h->pdev, irq_vector),
8035 &h->q[h->intr_mode]);
8036 h->q[h->intr_mode] = 0;
8037 return;
8038 }
8039
8040 for (i = 0; i < h->msix_vectors; i++) {
8041 free_irq(pci_irq_vector(h->pdev, i), &h->q[i]);
8042 h->q[i] = 0;
8043 }
8044 for (; i < MAX_REPLY_QUEUES; i++)
8045 h->q[i] = 0;
8046}
8047
8048
8049static int hpsa_request_irqs(struct ctlr_info *h,
8050 irqreturn_t (*msixhandler)(int, void *),
8051 irqreturn_t (*intxhandler)(int, void *))
8052{
8053 int rc, i;
8054 int irq_vector = 0;
8055
8056 if (hpsa_simple_mode)
8057 irq_vector = h->intr_mode;
8058
8059
8060
8061
8062
8063 for (i = 0; i < MAX_REPLY_QUEUES; i++)
8064 h->q[i] = (u8) i;
8065
8066 if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) {
8067
8068 for (i = 0; i < h->msix_vectors; i++) {
8069 sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
8070 rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler,
8071 0, h->intrname[i],
8072 &h->q[i]);
8073 if (rc) {
8074 int j;
8075
8076 dev_err(&h->pdev->dev,
8077 "failed to get irq %d for %s\n",
8078 pci_irq_vector(h->pdev, i), h->devname);
8079 for (j = 0; j < i; j++) {
8080 free_irq(pci_irq_vector(h->pdev, j), &h->q[j]);
8081 h->q[j] = 0;
8082 }
8083 for (; j < MAX_REPLY_QUEUES; j++)
8084 h->q[j] = 0;
8085 return rc;
8086 }
8087 }
8088 } else {
8089
8090 if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
8091 sprintf(h->intrname[0], "%s-msi%s", h->devname,
8092 h->msix_vectors ? "x" : "");
8093 rc = request_irq(pci_irq_vector(h->pdev, irq_vector),
8094 msixhandler, 0,
8095 h->intrname[0],
8096 &h->q[h->intr_mode]);
8097 } else {
8098 sprintf(h->intrname[h->intr_mode],
8099 "%s-intx", h->devname);
8100 rc = request_irq(pci_irq_vector(h->pdev, irq_vector),
8101 intxhandler, IRQF_SHARED,
8102 h->intrname[0],
8103 &h->q[h->intr_mode]);
8104 }
8105 }
8106 if (rc) {
8107 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
8108 pci_irq_vector(h->pdev, irq_vector), h->devname);
8109 hpsa_free_irqs(h);
8110 return -ENODEV;
8111 }
8112 return 0;
8113}
8114
8115static int hpsa_kdump_soft_reset(struct ctlr_info *h)
8116{
8117 int rc;
8118 hpsa_send_host_reset(h, HPSA_RESET_TYPE_CONTROLLER);
8119
8120 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
8121 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
8122 if (rc) {
8123 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
8124 return rc;
8125 }
8126
8127 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
8128 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
8129 if (rc) {
8130 dev_warn(&h->pdev->dev, "Board failed to become ready "
8131 "after soft reset.\n");
8132 return rc;
8133 }
8134
8135 return 0;
8136}
8137
8138static void hpsa_free_reply_queues(struct ctlr_info *h)
8139{
8140 int i;
8141
8142 for (i = 0; i < h->nreply_queues; i++) {
8143 if (!h->reply_queue[i].head)
8144 continue;
8145 dma_free_coherent(&h->pdev->dev,
8146 h->reply_queue_size,
8147 h->reply_queue[i].head,
8148 h->reply_queue[i].busaddr);
8149 h->reply_queue[i].head = NULL;
8150 h->reply_queue[i].busaddr = 0;
8151 }
8152 h->reply_queue_size = 0;
8153}
8154
8155static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
8156{
8157 hpsa_free_performant_mode(h);
8158 hpsa_free_sg_chain_blocks(h);
8159 hpsa_free_cmd_pool(h);
8160 hpsa_free_irqs(h);
8161 scsi_host_put(h->scsi_host);
8162 h->scsi_host = NULL;
8163 hpsa_free_pci_init(h);
8164 free_percpu(h->lockup_detected);
8165 h->lockup_detected = NULL;
8166 if (h->resubmit_wq) {
8167 destroy_workqueue(h->resubmit_wq);
8168 h->resubmit_wq = NULL;
8169 }
8170 if (h->rescan_ctlr_wq) {
8171 destroy_workqueue(h->rescan_ctlr_wq);
8172 h->rescan_ctlr_wq = NULL;
8173 }
8174 if (h->monitor_ctlr_wq) {
8175 destroy_workqueue(h->monitor_ctlr_wq);
8176 h->monitor_ctlr_wq = NULL;
8177 }
8178
8179 kfree(h);
8180}
8181
8182
8183static void fail_all_outstanding_cmds(struct ctlr_info *h)
8184{
8185 int i, refcount;
8186 struct CommandList *c;
8187 int failcount = 0;
8188
8189 flush_workqueue(h->resubmit_wq);
8190 for (i = 0; i < h->nr_cmds; i++) {
8191 c = h->cmd_pool + i;
8192 refcount = atomic_inc_return(&c->refcount);
8193 if (refcount > 1) {
8194 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
8195 finish_cmd(c);
8196 atomic_dec(&h->commands_outstanding);
8197 failcount++;
8198 }
8199 cmd_free(h, c);
8200 }
8201 dev_warn(&h->pdev->dev,
8202 "failed %d commands in fail_all\n", failcount);
8203}
8204
8205static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
8206{
8207 int cpu;
8208
8209 for_each_online_cpu(cpu) {
8210 u32 *lockup_detected;
8211 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
8212 *lockup_detected = value;
8213 }
8214 wmb();
8215}
8216
8217static void controller_lockup_detected(struct ctlr_info *h)
8218{
8219 unsigned long flags;
8220 u32 lockup_detected;
8221
8222 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8223 spin_lock_irqsave(&h->lock, flags);
8224 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
8225 if (!lockup_detected) {
8226
8227 dev_warn(&h->pdev->dev,
8228 "lockup detected after %d but scratchpad register is zero\n",
8229 h->heartbeat_sample_interval / HZ);
8230 lockup_detected = 0xffffffff;
8231 }
8232 set_lockup_detected_for_all_cpus(h, lockup_detected);
8233 spin_unlock_irqrestore(&h->lock, flags);
8234 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
8235 lockup_detected, h->heartbeat_sample_interval / HZ);
8236 if (lockup_detected == 0xffff0000) {
8237 dev_warn(&h->pdev->dev, "Telling controller to do a CHKPT\n");
8238 writel(DOORBELL_GENERATE_CHKPT, h->vaddr + SA5_DOORBELL);
8239 }
8240 pci_disable_device(h->pdev);
8241 fail_all_outstanding_cmds(h);
8242}
8243
8244static int detect_controller_lockup(struct ctlr_info *h)
8245{
8246 u64 now;
8247 u32 heartbeat;
8248 unsigned long flags;
8249
8250 now = get_jiffies_64();
8251
8252 if (time_after64(h->last_intr_timestamp +
8253 (h->heartbeat_sample_interval), now))
8254 return false;
8255
8256
8257
8258
8259
8260
8261 if (time_after64(h->last_heartbeat_timestamp +
8262 (h->heartbeat_sample_interval), now))
8263 return false;
8264
8265
8266 spin_lock_irqsave(&h->lock, flags);
8267 heartbeat = readl(&h->cfgtable->HeartBeat);
8268 spin_unlock_irqrestore(&h->lock, flags);
8269 if (h->last_heartbeat == heartbeat) {
8270 controller_lockup_detected(h);
8271 return true;
8272 }
8273
8274
8275 h->last_heartbeat = heartbeat;
8276 h->last_heartbeat_timestamp = now;
8277 return false;
8278}
8279
8280
8281
8282
8283
8284
8285
8286
8287
8288
8289static void hpsa_set_ioaccel_status(struct ctlr_info *h)
8290{
8291 int rc;
8292 int i;
8293 u8 ioaccel_status;
8294 unsigned char *buf;
8295 struct hpsa_scsi_dev_t *device;
8296
8297 if (!h)
8298 return;
8299
8300 buf = kmalloc(64, GFP_KERNEL);
8301 if (!buf)
8302 return;
8303
8304
8305
8306
8307 for (i = 0; i < h->ndevices; i++) {
8308 device = h->dev[i];
8309
8310 if (!device)
8311 continue;
8312 if (!hpsa_vpd_page_supported(h, device->scsi3addr,
8313 HPSA_VPD_LV_IOACCEL_STATUS))
8314 continue;
8315
8316 memset(buf, 0, 64);
8317
8318 rc = hpsa_scsi_do_inquiry(h, device->scsi3addr,
8319 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS,
8320 buf, 64);
8321 if (rc != 0)
8322 continue;
8323
8324 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
8325 device->offload_config =
8326 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
8327 if (device->offload_config)
8328 device->offload_to_be_enabled =
8329 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
8330
8331
8332
8333
8334
8335
8336
8337
8338
8339
8340
8341
8342 if (!device->offload_to_be_enabled)
8343 device->offload_enabled = 0;
8344 }
8345
8346 kfree(buf);
8347}
8348
8349static void hpsa_ack_ctlr_events(struct ctlr_info *h)
8350{
8351 char *event_type;
8352
8353 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8354 return;
8355
8356
8357 if ((h->transMethod & (CFGTBL_Trans_io_accel1
8358 | CFGTBL_Trans_io_accel2)) &&
8359 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
8360 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
8361
8362 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
8363 event_type = "state change";
8364 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
8365 event_type = "configuration change";
8366
8367 scsi_block_requests(h->scsi_host);
8368 hpsa_set_ioaccel_status(h);
8369 hpsa_drain_accel_commands(h);
8370
8371 dev_warn(&h->pdev->dev,
8372 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
8373 h->events, event_type);
8374 writel(h->events, &(h->cfgtable->clear_event_notify));
8375
8376 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8377
8378 hpsa_wait_for_clear_event_notify_ack(h);
8379 scsi_unblock_requests(h->scsi_host);
8380 } else {
8381
8382 writel(h->events, &(h->cfgtable->clear_event_notify));
8383 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8384 hpsa_wait_for_clear_event_notify_ack(h);
8385 }
8386 return;
8387}
8388
8389
8390
8391
8392
8393
8394static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
8395{
8396 if (h->drv_req_rescan) {
8397 h->drv_req_rescan = 0;
8398 return 1;
8399 }
8400
8401 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8402 return 0;
8403
8404 h->events = readl(&(h->cfgtable->event_notify));
8405 return h->events & RESCAN_REQUIRED_EVENT_BITS;
8406}
8407
8408
8409
8410
8411static int hpsa_offline_devices_ready(struct ctlr_info *h)
8412{
8413 unsigned long flags;
8414 struct offline_device_entry *d;
8415 struct list_head *this, *tmp;
8416
8417 spin_lock_irqsave(&h->offline_device_lock, flags);
8418 list_for_each_safe(this, tmp, &h->offline_device_list) {
8419 d = list_entry(this, struct offline_device_entry,
8420 offline_list);
8421 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8422 if (!hpsa_volume_offline(h, d->scsi3addr)) {
8423 spin_lock_irqsave(&h->offline_device_lock, flags);
8424 list_del(&d->offline_list);
8425 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8426 return 1;
8427 }
8428 spin_lock_irqsave(&h->offline_device_lock, flags);
8429 }
8430 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8431 return 0;
8432}
8433
8434static int hpsa_luns_changed(struct ctlr_info *h)
8435{
8436 int rc = 1;
8437 struct ReportLUNdata *logdev = NULL;
8438
8439
8440
8441
8442
8443 if (!h->lastlogicals)
8444 return rc;
8445
8446 logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
8447 if (!logdev)
8448 return rc;
8449
8450 if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
8451 dev_warn(&h->pdev->dev,
8452 "report luns failed, can't track lun changes.\n");
8453 goto out;
8454 }
8455 if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) {
8456 dev_info(&h->pdev->dev,
8457 "Lun changes detected.\n");
8458 memcpy(h->lastlogicals, logdev, sizeof(*logdev));
8459 goto out;
8460 } else
8461 rc = 0;
8462out:
8463 kfree(logdev);
8464 return rc;
8465}
8466
8467static void hpsa_perform_rescan(struct ctlr_info *h)
8468{
8469 struct Scsi_Host *sh = NULL;
8470 unsigned long flags;
8471
8472
8473
8474
8475 spin_lock_irqsave(&h->reset_lock, flags);
8476 if (h->reset_in_progress) {
8477 h->drv_req_rescan = 1;
8478 spin_unlock_irqrestore(&h->reset_lock, flags);
8479 return;
8480 }
8481 spin_unlock_irqrestore(&h->reset_lock, flags);
8482
8483 sh = scsi_host_get(h->scsi_host);
8484 if (sh != NULL) {
8485 hpsa_scan_start(sh);
8486 scsi_host_put(sh);
8487 h->drv_req_rescan = 0;
8488 }
8489}
8490
8491
8492
8493
8494static void hpsa_event_monitor_worker(struct work_struct *work)
8495{
8496 struct ctlr_info *h = container_of(to_delayed_work(work),
8497 struct ctlr_info, event_monitor_work);
8498 unsigned long flags;
8499
8500 spin_lock_irqsave(&h->lock, flags);
8501 if (h->remove_in_progress) {
8502 spin_unlock_irqrestore(&h->lock, flags);
8503 return;
8504 }
8505 spin_unlock_irqrestore(&h->lock, flags);
8506
8507 if (hpsa_ctlr_needs_rescan(h)) {
8508 hpsa_ack_ctlr_events(h);
8509 hpsa_perform_rescan(h);
8510 }
8511
8512 spin_lock_irqsave(&h->lock, flags);
8513 if (!h->remove_in_progress)
8514 queue_delayed_work(h->monitor_ctlr_wq, &h->event_monitor_work,
8515 HPSA_EVENT_MONITOR_INTERVAL);
8516 spin_unlock_irqrestore(&h->lock, flags);
8517}
8518
8519static void hpsa_rescan_ctlr_worker(struct work_struct *work)
8520{
8521 unsigned long flags;
8522 struct ctlr_info *h = container_of(to_delayed_work(work),
8523 struct ctlr_info, rescan_ctlr_work);
8524
8525 spin_lock_irqsave(&h->lock, flags);
8526 if (h->remove_in_progress) {
8527 spin_unlock_irqrestore(&h->lock, flags);
8528 return;
8529 }
8530 spin_unlock_irqrestore(&h->lock, flags);
8531
8532 if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) {
8533 hpsa_perform_rescan(h);
8534 } else if (h->discovery_polling) {
8535 if (hpsa_luns_changed(h)) {
8536 dev_info(&h->pdev->dev,
8537 "driver discovery polling rescan.\n");
8538 hpsa_perform_rescan(h);
8539 }
8540 }
8541 spin_lock_irqsave(&h->lock, flags);
8542 if (!h->remove_in_progress)
8543 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8544 h->heartbeat_sample_interval);
8545 spin_unlock_irqrestore(&h->lock, flags);
8546}
8547
8548static void hpsa_monitor_ctlr_worker(struct work_struct *work)
8549{
8550 unsigned long flags;
8551 struct ctlr_info *h = container_of(to_delayed_work(work),
8552 struct ctlr_info, monitor_ctlr_work);
8553
8554 detect_controller_lockup(h);
8555 if (lockup_detected(h))
8556 return;
8557
8558 spin_lock_irqsave(&h->lock, flags);
8559 if (!h->remove_in_progress)
8560 queue_delayed_work(h->monitor_ctlr_wq, &h->monitor_ctlr_work,
8561 h->heartbeat_sample_interval);
8562 spin_unlock_irqrestore(&h->lock, flags);
8563}
8564
8565static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
8566 char *name)
8567{
8568 struct workqueue_struct *wq = NULL;
8569
8570 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
8571 if (!wq)
8572 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
8573
8574 return wq;
8575}
8576
8577static void hpda_free_ctlr_info(struct ctlr_info *h)
8578{
8579 kfree(h->reply_map);
8580 kfree(h);
8581}
8582
8583static struct ctlr_info *hpda_alloc_ctlr_info(void)
8584{
8585 struct ctlr_info *h;
8586
8587 h = kzalloc(sizeof(*h), GFP_KERNEL);
8588 if (!h)
8589 return NULL;
8590
8591 h->reply_map = kcalloc(nr_cpu_ids, sizeof(*h->reply_map), GFP_KERNEL);
8592 if (!h->reply_map) {
8593 kfree(h);
8594 return NULL;
8595 }
8596 return h;
8597}
8598
8599static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8600{
8601 int dac, rc;
8602 struct ctlr_info *h;
8603 int try_soft_reset = 0;
8604 unsigned long flags;
8605 u32 board_id;
8606
8607 if (number_of_controllers == 0)
8608 printk(KERN_INFO DRIVER_NAME "\n");
8609
8610 rc = hpsa_lookup_board_id(pdev, &board_id, NULL);
8611 if (rc < 0) {
8612 dev_warn(&pdev->dev, "Board ID not found\n");
8613 return rc;
8614 }
8615
8616 rc = hpsa_init_reset_devices(pdev, board_id);
8617 if (rc) {
8618 if (rc != -ENOTSUPP)
8619 return rc;
8620
8621
8622
8623
8624
8625 try_soft_reset = 1;
8626 rc = 0;
8627 }
8628
8629reinit_after_soft_reset:
8630
8631
8632
8633
8634
8635 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
8636 h = hpda_alloc_ctlr_info();
8637 if (!h) {
8638 dev_err(&pdev->dev, "Failed to allocate controller head\n");
8639 return -ENOMEM;
8640 }
8641
8642 h->pdev = pdev;
8643
8644 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
8645 INIT_LIST_HEAD(&h->offline_device_list);
8646 spin_lock_init(&h->lock);
8647 spin_lock_init(&h->offline_device_lock);
8648 spin_lock_init(&h->scan_lock);
8649 spin_lock_init(&h->reset_lock);
8650 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
8651
8652
8653 h->lockup_detected = alloc_percpu(u32);
8654 if (!h->lockup_detected) {
8655 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
8656 rc = -ENOMEM;
8657 goto clean1;
8658 }
8659 set_lockup_detected_for_all_cpus(h, 0);
8660
8661 rc = hpsa_pci_init(h);
8662 if (rc)
8663 goto clean2;
8664
8665
8666
8667 rc = hpsa_scsi_host_alloc(h);
8668 if (rc)
8669 goto clean2_5;
8670
8671 sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
8672 h->ctlr = number_of_controllers;
8673 number_of_controllers++;
8674
8675
8676 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
8677 if (rc == 0) {
8678 dac = 1;
8679 } else {
8680 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
8681 if (rc == 0) {
8682 dac = 0;
8683 } else {
8684 dev_err(&pdev->dev, "no suitable DMA available\n");
8685 goto clean3;
8686 }
8687 }
8688
8689
8690 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8691
8692 rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
8693 if (rc)
8694 goto clean3;
8695 rc = hpsa_alloc_cmd_pool(h);
8696 if (rc)
8697 goto clean4;
8698 rc = hpsa_alloc_sg_chain_blocks(h);
8699 if (rc)
8700 goto clean5;
8701 init_waitqueue_head(&h->scan_wait_queue);
8702 init_waitqueue_head(&h->event_sync_wait_queue);
8703 mutex_init(&h->reset_mutex);
8704 h->scan_finished = 1;
8705 h->scan_waiting = 0;
8706
8707 pci_set_drvdata(pdev, h);
8708 h->ndevices = 0;
8709
8710 spin_lock_init(&h->devlock);
8711 rc = hpsa_put_ctlr_into_performant_mode(h);
8712 if (rc)
8713 goto clean6;
8714
8715
8716 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
8717 if (!h->rescan_ctlr_wq) {
8718 rc = -ENOMEM;
8719 goto clean7;
8720 }
8721
8722 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
8723 if (!h->resubmit_wq) {
8724 rc = -ENOMEM;
8725 goto clean7;
8726 }
8727
8728 h->monitor_ctlr_wq = hpsa_create_controller_wq(h, "monitor");
8729 if (!h->monitor_ctlr_wq) {
8730 rc = -ENOMEM;
8731 goto clean7;
8732 }
8733
8734
8735
8736
8737
8738
8739 if (try_soft_reset) {
8740
8741
8742
8743
8744
8745
8746
8747
8748 spin_lock_irqsave(&h->lock, flags);
8749 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8750 spin_unlock_irqrestore(&h->lock, flags);
8751 hpsa_free_irqs(h);
8752 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
8753 hpsa_intx_discard_completions);
8754 if (rc) {
8755 dev_warn(&h->pdev->dev,
8756 "Failed to request_irq after soft reset.\n");
8757
8758
8759
8760
8761 hpsa_free_performant_mode(h);
8762 hpsa_free_sg_chain_blocks(h);
8763 hpsa_free_cmd_pool(h);
8764
8765
8766
8767
8768 goto clean3;
8769 }
8770
8771 rc = hpsa_kdump_soft_reset(h);
8772 if (rc)
8773
8774 goto clean7;
8775
8776 dev_info(&h->pdev->dev, "Board READY.\n");
8777 dev_info(&h->pdev->dev,
8778 "Waiting for stale completions to drain.\n");
8779 h->access.set_intr_mask(h, HPSA_INTR_ON);
8780 msleep(10000);
8781 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8782
8783 rc = controller_reset_failed(h->cfgtable);
8784 if (rc)
8785 dev_info(&h->pdev->dev,
8786 "Soft reset appears to have failed.\n");
8787
8788
8789
8790
8791
8792 hpsa_undo_allocations_after_kdump_soft_reset(h);
8793 try_soft_reset = 0;
8794 if (rc)
8795
8796 return -ENODEV;
8797
8798 goto reinit_after_soft_reset;
8799 }
8800
8801
8802 h->acciopath_status = 1;
8803
8804 h->discovery_polling = 0;
8805
8806
8807
8808 h->access.set_intr_mask(h, HPSA_INTR_ON);
8809
8810 hpsa_hba_inquiry(h);
8811
8812 h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL);
8813 if (!h->lastlogicals)
8814 dev_info(&h->pdev->dev,
8815 "Can't track change to report lun data\n");
8816
8817
8818 rc = hpsa_scsi_add_host(h);
8819 if (rc)
8820 goto clean7;
8821
8822
8823 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
8824 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
8825 schedule_delayed_work(&h->monitor_ctlr_work,
8826 h->heartbeat_sample_interval);
8827 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
8828 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8829 h->heartbeat_sample_interval);
8830 INIT_DELAYED_WORK(&h->event_monitor_work, hpsa_event_monitor_worker);
8831 schedule_delayed_work(&h->event_monitor_work,
8832 HPSA_EVENT_MONITOR_INTERVAL);
8833 return 0;
8834
8835clean7:
8836 hpsa_free_performant_mode(h);
8837 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8838clean6:
8839 hpsa_free_sg_chain_blocks(h);
8840clean5:
8841 hpsa_free_cmd_pool(h);
8842clean4:
8843 hpsa_free_irqs(h);
8844clean3:
8845 scsi_host_put(h->scsi_host);
8846 h->scsi_host = NULL;
8847clean2_5:
8848 hpsa_free_pci_init(h);
8849clean2:
8850 if (h->lockup_detected) {
8851 free_percpu(h->lockup_detected);
8852 h->lockup_detected = NULL;
8853 }
8854clean1:
8855 if (h->resubmit_wq) {
8856 destroy_workqueue(h->resubmit_wq);
8857 h->resubmit_wq = NULL;
8858 }
8859 if (h->rescan_ctlr_wq) {
8860 destroy_workqueue(h->rescan_ctlr_wq);
8861 h->rescan_ctlr_wq = NULL;
8862 }
8863 if (h->monitor_ctlr_wq) {
8864 destroy_workqueue(h->monitor_ctlr_wq);
8865 h->monitor_ctlr_wq = NULL;
8866 }
8867 kfree(h);
8868 return rc;
8869}
8870
8871static void hpsa_flush_cache(struct ctlr_info *h)
8872{
8873 char *flush_buf;
8874 struct CommandList *c;
8875 int rc;
8876
8877 if (unlikely(lockup_detected(h)))
8878 return;
8879 flush_buf = kzalloc(4, GFP_KERNEL);
8880 if (!flush_buf)
8881 return;
8882
8883 c = cmd_alloc(h);
8884
8885 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8886 RAID_CTLR_LUNID, TYPE_CMD)) {
8887 goto out;
8888 }
8889 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
8890 DEFAULT_TIMEOUT);
8891 if (rc)
8892 goto out;
8893 if (c->err_info->CommandStatus != 0)
8894out:
8895 dev_warn(&h->pdev->dev,
8896 "error flushing cache on controller\n");
8897 cmd_free(h, c);
8898 kfree(flush_buf);
8899}
8900
8901
8902
8903
8904static void hpsa_disable_rld_caching(struct ctlr_info *h)
8905{
8906 u32 *options;
8907 struct CommandList *c;
8908 int rc;
8909
8910
8911 if (unlikely(h->lockup_detected))
8912 return;
8913
8914 options = kzalloc(sizeof(*options), GFP_KERNEL);
8915 if (!options)
8916 return;
8917
8918 c = cmd_alloc(h);
8919
8920
8921 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
8922 RAID_CTLR_LUNID, TYPE_CMD))
8923 goto errout;
8924
8925 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
8926 NO_TIMEOUT);
8927 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8928 goto errout;
8929
8930
8931 *options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING;
8932
8933 if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0,
8934 RAID_CTLR_LUNID, TYPE_CMD))
8935 goto errout;
8936
8937 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
8938 NO_TIMEOUT);
8939 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8940 goto errout;
8941
8942
8943 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
8944 RAID_CTLR_LUNID, TYPE_CMD))
8945 goto errout;
8946
8947 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
8948 NO_TIMEOUT);
8949 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8950 goto errout;
8951
8952 if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
8953 goto out;
8954
8955errout:
8956 dev_err(&h->pdev->dev,
8957 "Error: failed to disable report lun data caching.\n");
8958out:
8959 cmd_free(h, c);
8960 kfree(options);
8961}
8962
8963static void __hpsa_shutdown(struct pci_dev *pdev)
8964{
8965 struct ctlr_info *h;
8966
8967 h = pci_get_drvdata(pdev);
8968
8969
8970
8971
8972 hpsa_flush_cache(h);
8973 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8974 hpsa_free_irqs(h);
8975 hpsa_disable_interrupt_mode(h);
8976}
8977
8978static void hpsa_shutdown(struct pci_dev *pdev)
8979{
8980 __hpsa_shutdown(pdev);
8981 pci_disable_device(pdev);
8982}
8983
8984static void hpsa_free_device_info(struct ctlr_info *h)
8985{
8986 int i;
8987
8988 for (i = 0; i < h->ndevices; i++) {
8989 kfree(h->dev[i]);
8990 h->dev[i] = NULL;
8991 }
8992}
8993
8994static void hpsa_remove_one(struct pci_dev *pdev)
8995{
8996 struct ctlr_info *h;
8997 unsigned long flags;
8998
8999 if (pci_get_drvdata(pdev) == NULL) {
9000 dev_err(&pdev->dev, "unable to remove device\n");
9001 return;
9002 }
9003 h = pci_get_drvdata(pdev);
9004
9005
9006 spin_lock_irqsave(&h->lock, flags);
9007 h->remove_in_progress = 1;
9008 spin_unlock_irqrestore(&h->lock, flags);
9009 cancel_delayed_work_sync(&h->monitor_ctlr_work);
9010 cancel_delayed_work_sync(&h->rescan_ctlr_work);
9011 cancel_delayed_work_sync(&h->event_monitor_work);
9012 destroy_workqueue(h->rescan_ctlr_wq);
9013 destroy_workqueue(h->resubmit_wq);
9014 destroy_workqueue(h->monitor_ctlr_wq);
9015
9016 hpsa_delete_sas_host(h);
9017
9018
9019
9020
9021
9022
9023
9024 if (h->scsi_host)
9025 scsi_remove_host(h->scsi_host);
9026
9027
9028 __hpsa_shutdown(pdev);
9029
9030 hpsa_free_device_info(h);
9031
9032 kfree(h->hba_inquiry_data);
9033 h->hba_inquiry_data = NULL;
9034 hpsa_free_ioaccel2_sg_chain_blocks(h);
9035 hpsa_free_performant_mode(h);
9036 hpsa_free_sg_chain_blocks(h);
9037 hpsa_free_cmd_pool(h);
9038 kfree(h->lastlogicals);
9039
9040
9041
9042 scsi_host_put(h->scsi_host);
9043 h->scsi_host = NULL;
9044
9045
9046 hpsa_free_pci_init(h);
9047
9048 free_percpu(h->lockup_detected);
9049 h->lockup_detected = NULL;
9050
9051
9052 hpda_free_ctlr_info(h);
9053}
9054
9055static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
9056 __attribute__((unused)) pm_message_t state)
9057{
9058 return -ENOSYS;
9059}
9060
9061static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
9062{
9063 return -ENOSYS;
9064}
9065
9066static struct pci_driver hpsa_pci_driver = {
9067 .name = HPSA,
9068 .probe = hpsa_init_one,
9069 .remove = hpsa_remove_one,
9070 .id_table = hpsa_pci_device_id,
9071 .shutdown = hpsa_shutdown,
9072 .suspend = hpsa_suspend,
9073 .resume = hpsa_resume,
9074};
9075
9076
9077
9078
9079
9080
9081
9082
9083
9084
9085
9086
9087
9088static void calc_bucket_map(int bucket[], int num_buckets,
9089 int nsgs, int min_blocks, u32 *bucket_map)
9090{
9091 int i, j, b, size;
9092
9093
9094 for (i = 0; i <= nsgs; i++) {
9095
9096 size = i + min_blocks;
9097 b = num_buckets;
9098
9099 for (j = 0; j < num_buckets; j++) {
9100 if (bucket[j] >= size) {
9101 b = j;
9102 break;
9103 }
9104 }
9105
9106 bucket_map[i] = b;
9107 }
9108}
9109
9110
9111
9112
9113
9114static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
9115{
9116 int i;
9117 unsigned long register_value;
9118 unsigned long transMethod = CFGTBL_Trans_Performant |
9119 (trans_support & CFGTBL_Trans_use_short_tags) |
9120 CFGTBL_Trans_enable_directed_msix |
9121 (trans_support & (CFGTBL_Trans_io_accel1 |
9122 CFGTBL_Trans_io_accel2));
9123 struct access_method access = SA5_performant_access;
9124
9125
9126
9127
9128
9129
9130
9131
9132
9133
9134
9135
9136
9137
9138
9139
9140
9141
9142 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
9143#define MIN_IOACCEL2_BFT_ENTRY 5
9144#define HPSA_IOACCEL2_HEADER_SZ 4
9145 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
9146 13, 14, 15, 16, 17, 18, 19,
9147 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
9148 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
9149 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
9150 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
9151 16 * MIN_IOACCEL2_BFT_ENTRY);
9152 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
9153 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
9154
9155
9156
9157
9158
9159
9160
9161
9162
9163
9164 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
9165 access = SA5_performant_access_no_read;
9166
9167
9168 for (i = 0; i < h->nreply_queues; i++)
9169 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
9170
9171 bft[7] = SG_ENTRIES_IN_CMD + 4;
9172 calc_bucket_map(bft, ARRAY_SIZE(bft),
9173 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
9174 for (i = 0; i < 8; i++)
9175 writel(bft[i], &h->transtable->BlockFetch[i]);
9176
9177
9178 writel(h->max_commands, &h->transtable->RepQSize);
9179 writel(h->nreply_queues, &h->transtable->RepQCount);
9180 writel(0, &h->transtable->RepQCtrAddrLow32);
9181 writel(0, &h->transtable->RepQCtrAddrHigh32);
9182
9183 for (i = 0; i < h->nreply_queues; i++) {
9184 writel(0, &h->transtable->RepQAddr[i].upper);
9185 writel(h->reply_queue[i].busaddr,
9186 &h->transtable->RepQAddr[i].lower);
9187 }
9188
9189 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
9190 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
9191
9192
9193
9194 if (trans_support & CFGTBL_Trans_io_accel1) {
9195 access = SA5_ioaccel_mode1_access;
9196 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
9197 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
9198 } else
9199 if (trans_support & CFGTBL_Trans_io_accel2)
9200 access = SA5_ioaccel_mode2_access;
9201 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9202 if (hpsa_wait_for_mode_change_ack(h)) {
9203 dev_err(&h->pdev->dev,
9204 "performant mode problem - doorbell timeout\n");
9205 return -ENODEV;
9206 }
9207 register_value = readl(&(h->cfgtable->TransportActive));
9208 if (!(register_value & CFGTBL_Trans_Performant)) {
9209 dev_err(&h->pdev->dev,
9210 "performant mode problem - transport not active\n");
9211 return -ENODEV;
9212 }
9213
9214 h->access = access;
9215 h->transMethod = transMethod;
9216
9217 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
9218 (trans_support & CFGTBL_Trans_io_accel2)))
9219 return 0;
9220
9221 if (trans_support & CFGTBL_Trans_io_accel1) {
9222
9223 for (i = 0; i < h->nreply_queues; i++) {
9224 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
9225 h->reply_queue[i].current_entry =
9226 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
9227 }
9228 bft[7] = h->ioaccel_maxsg + 8;
9229 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
9230 h->ioaccel1_blockFetchTable);
9231
9232
9233 for (i = 0; i < h->nreply_queues; i++)
9234 memset(h->reply_queue[i].head,
9235 (u8) IOACCEL_MODE1_REPLY_UNUSED,
9236 h->reply_queue_size);
9237
9238
9239
9240
9241 for (i = 0; i < h->nr_cmds; i++) {
9242 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
9243
9244 cp->function = IOACCEL1_FUNCTION_SCSIIO;
9245 cp->err_info = (u32) (h->errinfo_pool_dhandle +
9246 (i * sizeof(struct ErrorInfo)));
9247 cp->err_info_len = sizeof(struct ErrorInfo);
9248 cp->sgl_offset = IOACCEL1_SGLOFFSET;
9249 cp->host_context_flags =
9250 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
9251 cp->timeout_sec = 0;
9252 cp->ReplyQueue = 0;
9253 cp->tag =
9254 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
9255 cp->host_addr =
9256 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
9257 (i * sizeof(struct io_accel1_cmd)));
9258 }
9259 } else if (trans_support & CFGTBL_Trans_io_accel2) {
9260 u64 cfg_offset, cfg_base_addr_index;
9261 u32 bft2_offset, cfg_base_addr;
9262 int rc;
9263
9264 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
9265 &cfg_base_addr_index, &cfg_offset);
9266 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
9267 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
9268 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
9269 4, h->ioaccel2_blockFetchTable);
9270 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
9271 BUILD_BUG_ON(offsetof(struct CfgTable,
9272 io_accel_request_size_offset) != 0xb8);
9273 h->ioaccel2_bft2_regs =
9274 remap_pci_mem(pci_resource_start(h->pdev,
9275 cfg_base_addr_index) +
9276 cfg_offset + bft2_offset,
9277 ARRAY_SIZE(bft2) *
9278 sizeof(*h->ioaccel2_bft2_regs));
9279 for (i = 0; i < ARRAY_SIZE(bft2); i++)
9280 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
9281 }
9282 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9283 if (hpsa_wait_for_mode_change_ack(h)) {
9284 dev_err(&h->pdev->dev,
9285 "performant mode problem - enabling ioaccel mode\n");
9286 return -ENODEV;
9287 }
9288 return 0;
9289}
9290
9291
9292static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9293{
9294 if (h->ioaccel_cmd_pool) {
9295 pci_free_consistent(h->pdev,
9296 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9297 h->ioaccel_cmd_pool,
9298 h->ioaccel_cmd_pool_dhandle);
9299 h->ioaccel_cmd_pool = NULL;
9300 h->ioaccel_cmd_pool_dhandle = 0;
9301 }
9302 kfree(h->ioaccel1_blockFetchTable);
9303 h->ioaccel1_blockFetchTable = NULL;
9304}
9305
9306
9307static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9308{
9309 h->ioaccel_maxsg =
9310 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9311 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
9312 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
9313
9314
9315
9316
9317
9318 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
9319 IOACCEL1_COMMANDLIST_ALIGNMENT);
9320 h->ioaccel_cmd_pool =
9321 dma_alloc_coherent(&h->pdev->dev,
9322 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9323 &h->ioaccel_cmd_pool_dhandle, GFP_KERNEL);
9324
9325 h->ioaccel1_blockFetchTable =
9326 kmalloc(((h->ioaccel_maxsg + 1) *
9327 sizeof(u32)), GFP_KERNEL);
9328
9329 if ((h->ioaccel_cmd_pool == NULL) ||
9330 (h->ioaccel1_blockFetchTable == NULL))
9331 goto clean_up;
9332
9333 memset(h->ioaccel_cmd_pool, 0,
9334 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
9335 return 0;
9336
9337clean_up:
9338 hpsa_free_ioaccel1_cmd_and_bft(h);
9339 return -ENOMEM;
9340}
9341
9342
9343static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9344{
9345 hpsa_free_ioaccel2_sg_chain_blocks(h);
9346
9347 if (h->ioaccel2_cmd_pool) {
9348 pci_free_consistent(h->pdev,
9349 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9350 h->ioaccel2_cmd_pool,
9351 h->ioaccel2_cmd_pool_dhandle);
9352 h->ioaccel2_cmd_pool = NULL;
9353 h->ioaccel2_cmd_pool_dhandle = 0;
9354 }
9355 kfree(h->ioaccel2_blockFetchTable);
9356 h->ioaccel2_blockFetchTable = NULL;
9357}
9358
9359
9360static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9361{
9362 int rc;
9363
9364
9365
9366 h->ioaccel_maxsg =
9367 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9368 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
9369 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
9370
9371 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
9372 IOACCEL2_COMMANDLIST_ALIGNMENT);
9373 h->ioaccel2_cmd_pool =
9374 dma_alloc_coherent(&h->pdev->dev,
9375 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9376 &h->ioaccel2_cmd_pool_dhandle, GFP_KERNEL);
9377
9378 h->ioaccel2_blockFetchTable =
9379 kmalloc(((h->ioaccel_maxsg + 1) *
9380 sizeof(u32)), GFP_KERNEL);
9381
9382 if ((h->ioaccel2_cmd_pool == NULL) ||
9383 (h->ioaccel2_blockFetchTable == NULL)) {
9384 rc = -ENOMEM;
9385 goto clean_up;
9386 }
9387
9388 rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
9389 if (rc)
9390 goto clean_up;
9391
9392 memset(h->ioaccel2_cmd_pool, 0,
9393 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
9394 return 0;
9395
9396clean_up:
9397 hpsa_free_ioaccel2_cmd_and_bft(h);
9398 return rc;
9399}
9400
9401
9402static void hpsa_free_performant_mode(struct ctlr_info *h)
9403{
9404 kfree(h->blockFetchTable);
9405 h->blockFetchTable = NULL;
9406 hpsa_free_reply_queues(h);
9407 hpsa_free_ioaccel1_cmd_and_bft(h);
9408 hpsa_free_ioaccel2_cmd_and_bft(h);
9409}
9410
9411
9412
9413
9414static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
9415{
9416 u32 trans_support;
9417 unsigned long transMethod = CFGTBL_Trans_Performant |
9418 CFGTBL_Trans_use_short_tags;
9419 int i, rc;
9420
9421 if (hpsa_simple_mode)
9422 return 0;
9423
9424 trans_support = readl(&(h->cfgtable->TransportSupport));
9425 if (!(trans_support & PERFORMANT_MODE))
9426 return 0;
9427
9428
9429 if (trans_support & CFGTBL_Trans_io_accel1) {
9430 transMethod |= CFGTBL_Trans_io_accel1 |
9431 CFGTBL_Trans_enable_directed_msix;
9432 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
9433 if (rc)
9434 return rc;
9435 } else if (trans_support & CFGTBL_Trans_io_accel2) {
9436 transMethod |= CFGTBL_Trans_io_accel2 |
9437 CFGTBL_Trans_enable_directed_msix;
9438 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
9439 if (rc)
9440 return rc;
9441 }
9442
9443 h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1;
9444 hpsa_get_max_perf_mode_cmds(h);
9445
9446 h->reply_queue_size = h->max_commands * sizeof(u64);
9447
9448 for (i = 0; i < h->nreply_queues; i++) {
9449 h->reply_queue[i].head = dma_alloc_coherent(&h->pdev->dev,
9450 h->reply_queue_size,
9451 &h->reply_queue[i].busaddr,
9452 GFP_KERNEL);
9453 if (!h->reply_queue[i].head) {
9454 rc = -ENOMEM;
9455 goto clean1;
9456 }
9457 h->reply_queue[i].size = h->max_commands;
9458 h->reply_queue[i].wraparound = 1;
9459 h->reply_queue[i].current_entry = 0;
9460 }
9461
9462
9463 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
9464 sizeof(u32)), GFP_KERNEL);
9465 if (!h->blockFetchTable) {
9466 rc = -ENOMEM;
9467 goto clean1;
9468 }
9469
9470 rc = hpsa_enter_performant_mode(h, trans_support);
9471 if (rc)
9472 goto clean2;
9473 return 0;
9474
9475clean2:
9476 kfree(h->blockFetchTable);
9477 h->blockFetchTable = NULL;
9478clean1:
9479 hpsa_free_reply_queues(h);
9480 hpsa_free_ioaccel1_cmd_and_bft(h);
9481 hpsa_free_ioaccel2_cmd_and_bft(h);
9482 return rc;
9483}
9484
9485static int is_accelerated_cmd(struct CommandList *c)
9486{
9487 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
9488}
9489
9490static void hpsa_drain_accel_commands(struct ctlr_info *h)
9491{
9492 struct CommandList *c = NULL;
9493 int i, accel_cmds_out;
9494 int refcount;
9495
9496 do {
9497 accel_cmds_out = 0;
9498 for (i = 0; i < h->nr_cmds; i++) {
9499 c = h->cmd_pool + i;
9500 refcount = atomic_inc_return(&c->refcount);
9501 if (refcount > 1)
9502 accel_cmds_out += is_accelerated_cmd(c);
9503 cmd_free(h, c);
9504 }
9505 if (accel_cmds_out <= 0)
9506 break;
9507 msleep(100);
9508 } while (1);
9509}
9510
9511static struct hpsa_sas_phy *hpsa_alloc_sas_phy(
9512 struct hpsa_sas_port *hpsa_sas_port)
9513{
9514 struct hpsa_sas_phy *hpsa_sas_phy;
9515 struct sas_phy *phy;
9516
9517 hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL);
9518 if (!hpsa_sas_phy)
9519 return NULL;
9520
9521 phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev,
9522 hpsa_sas_port->next_phy_index);
9523 if (!phy) {
9524 kfree(hpsa_sas_phy);
9525 return NULL;
9526 }
9527
9528 hpsa_sas_port->next_phy_index++;
9529 hpsa_sas_phy->phy = phy;
9530 hpsa_sas_phy->parent_port = hpsa_sas_port;
9531
9532 return hpsa_sas_phy;
9533}
9534
9535static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9536{
9537 struct sas_phy *phy = hpsa_sas_phy->phy;
9538
9539 sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
9540 if (hpsa_sas_phy->added_to_port)
9541 list_del(&hpsa_sas_phy->phy_list_entry);
9542 sas_phy_delete(phy);
9543 kfree(hpsa_sas_phy);
9544}
9545
9546static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9547{
9548 int rc;
9549 struct hpsa_sas_port *hpsa_sas_port;
9550 struct sas_phy *phy;
9551 struct sas_identify *identify;
9552
9553 hpsa_sas_port = hpsa_sas_phy->parent_port;
9554 phy = hpsa_sas_phy->phy;
9555
9556 identify = &phy->identify;
9557 memset(identify, 0, sizeof(*identify));
9558 identify->sas_address = hpsa_sas_port->sas_address;
9559 identify->device_type = SAS_END_DEVICE;
9560 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9561 identify->target_port_protocols = SAS_PROTOCOL_STP;
9562 phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9563 phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9564 phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
9565 phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
9566 phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
9567
9568 rc = sas_phy_add(hpsa_sas_phy->phy);
9569 if (rc)
9570 return rc;
9571
9572 sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy);
9573 list_add_tail(&hpsa_sas_phy->phy_list_entry,
9574 &hpsa_sas_port->phy_list_head);
9575 hpsa_sas_phy->added_to_port = true;
9576
9577 return 0;
9578}
9579
9580static int
9581 hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port,
9582 struct sas_rphy *rphy)
9583{
9584 struct sas_identify *identify;
9585
9586 identify = &rphy->identify;
9587 identify->sas_address = hpsa_sas_port->sas_address;
9588 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9589 identify->target_port_protocols = SAS_PROTOCOL_STP;
9590
9591 return sas_rphy_add(rphy);
9592}
9593
9594static struct hpsa_sas_port
9595 *hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node,
9596 u64 sas_address)
9597{
9598 int rc;
9599 struct hpsa_sas_port *hpsa_sas_port;
9600 struct sas_port *port;
9601
9602 hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL);
9603 if (!hpsa_sas_port)
9604 return NULL;
9605
9606 INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head);
9607 hpsa_sas_port->parent_node = hpsa_sas_node;
9608
9609 port = sas_port_alloc_num(hpsa_sas_node->parent_dev);
9610 if (!port)
9611 goto free_hpsa_port;
9612
9613 rc = sas_port_add(port);
9614 if (rc)
9615 goto free_sas_port;
9616
9617 hpsa_sas_port->port = port;
9618 hpsa_sas_port->sas_address = sas_address;
9619 list_add_tail(&hpsa_sas_port->port_list_entry,
9620 &hpsa_sas_node->port_list_head);
9621
9622 return hpsa_sas_port;
9623
9624free_sas_port:
9625 sas_port_free(port);
9626free_hpsa_port:
9627 kfree(hpsa_sas_port);
9628
9629 return NULL;
9630}
9631
9632static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port)
9633{
9634 struct hpsa_sas_phy *hpsa_sas_phy;
9635 struct hpsa_sas_phy *next;
9636
9637 list_for_each_entry_safe(hpsa_sas_phy, next,
9638 &hpsa_sas_port->phy_list_head, phy_list_entry)
9639 hpsa_free_sas_phy(hpsa_sas_phy);
9640
9641 sas_port_delete(hpsa_sas_port->port);
9642 list_del(&hpsa_sas_port->port_list_entry);
9643 kfree(hpsa_sas_port);
9644}
9645
9646static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev)
9647{
9648 struct hpsa_sas_node *hpsa_sas_node;
9649
9650 hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL);
9651 if (hpsa_sas_node) {
9652 hpsa_sas_node->parent_dev = parent_dev;
9653 INIT_LIST_HEAD(&hpsa_sas_node->port_list_head);
9654 }
9655
9656 return hpsa_sas_node;
9657}
9658
9659static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node)
9660{
9661 struct hpsa_sas_port *hpsa_sas_port;
9662 struct hpsa_sas_port *next;
9663
9664 if (!hpsa_sas_node)
9665 return;
9666
9667 list_for_each_entry_safe(hpsa_sas_port, next,
9668 &hpsa_sas_node->port_list_head, port_list_entry)
9669 hpsa_free_sas_port(hpsa_sas_port);
9670
9671 kfree(hpsa_sas_node);
9672}
9673
9674static struct hpsa_scsi_dev_t
9675 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
9676 struct sas_rphy *rphy)
9677{
9678 int i;
9679 struct hpsa_scsi_dev_t *device;
9680
9681 for (i = 0; i < h->ndevices; i++) {
9682 device = h->dev[i];
9683 if (!device->sas_port)
9684 continue;
9685 if (device->sas_port->rphy == rphy)
9686 return device;
9687 }
9688
9689 return NULL;
9690}
9691
9692static int hpsa_add_sas_host(struct ctlr_info *h)
9693{
9694 int rc;
9695 struct device *parent_dev;
9696 struct hpsa_sas_node *hpsa_sas_node;
9697 struct hpsa_sas_port *hpsa_sas_port;
9698 struct hpsa_sas_phy *hpsa_sas_phy;
9699
9700 parent_dev = &h->scsi_host->shost_dev;
9701
9702 hpsa_sas_node = hpsa_alloc_sas_node(parent_dev);
9703 if (!hpsa_sas_node)
9704 return -ENOMEM;
9705
9706 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address);
9707 if (!hpsa_sas_port) {
9708 rc = -ENODEV;
9709 goto free_sas_node;
9710 }
9711
9712 hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port);
9713 if (!hpsa_sas_phy) {
9714 rc = -ENODEV;
9715 goto free_sas_port;
9716 }
9717
9718 rc = hpsa_sas_port_add_phy(hpsa_sas_phy);
9719 if (rc)
9720 goto free_sas_phy;
9721
9722 h->sas_host = hpsa_sas_node;
9723
9724 return 0;
9725
9726free_sas_phy:
9727 hpsa_free_sas_phy(hpsa_sas_phy);
9728free_sas_port:
9729 hpsa_free_sas_port(hpsa_sas_port);
9730free_sas_node:
9731 hpsa_free_sas_node(hpsa_sas_node);
9732
9733 return rc;
9734}
9735
9736static void hpsa_delete_sas_host(struct ctlr_info *h)
9737{
9738 hpsa_free_sas_node(h->sas_host);
9739}
9740
9741static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
9742 struct hpsa_scsi_dev_t *device)
9743{
9744 int rc;
9745 struct hpsa_sas_port *hpsa_sas_port;
9746 struct sas_rphy *rphy;
9747
9748 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address);
9749 if (!hpsa_sas_port)
9750 return -ENOMEM;
9751
9752 rphy = sas_end_device_alloc(hpsa_sas_port->port);
9753 if (!rphy) {
9754 rc = -ENODEV;
9755 goto free_sas_port;
9756 }
9757
9758 hpsa_sas_port->rphy = rphy;
9759 device->sas_port = hpsa_sas_port;
9760
9761 rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy);
9762 if (rc)
9763 goto free_sas_port;
9764
9765 return 0;
9766
9767free_sas_port:
9768 hpsa_free_sas_port(hpsa_sas_port);
9769 device->sas_port = NULL;
9770
9771 return rc;
9772}
9773
9774static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device)
9775{
9776 if (device->sas_port) {
9777 hpsa_free_sas_port(device->sas_port);
9778 device->sas_port = NULL;
9779 }
9780}
9781
9782static int
9783hpsa_sas_get_linkerrors(struct sas_phy *phy)
9784{
9785 return 0;
9786}
9787
9788static int
9789hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
9790{
9791 struct Scsi_Host *shost = phy_to_shost(rphy);
9792 struct ctlr_info *h;
9793 struct hpsa_scsi_dev_t *sd;
9794
9795 if (!shost)
9796 return -ENXIO;
9797
9798 h = shost_to_hba(shost);
9799
9800 if (!h)
9801 return -ENXIO;
9802
9803 sd = hpsa_find_device_by_sas_rphy(h, rphy);
9804 if (!sd)
9805 return -ENXIO;
9806
9807 *identifier = sd->eli;
9808
9809 return 0;
9810}
9811
9812static int
9813hpsa_sas_get_bay_identifier(struct sas_rphy *rphy)
9814{
9815 return -ENXIO;
9816}
9817
9818static int
9819hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset)
9820{
9821 return 0;
9822}
9823
9824static int
9825hpsa_sas_phy_enable(struct sas_phy *phy, int enable)
9826{
9827 return 0;
9828}
9829
9830static int
9831hpsa_sas_phy_setup(struct sas_phy *phy)
9832{
9833 return 0;
9834}
9835
9836static void
9837hpsa_sas_phy_release(struct sas_phy *phy)
9838{
9839}
9840
9841static int
9842hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
9843{
9844 return -EINVAL;
9845}
9846
9847static struct sas_function_template hpsa_sas_transport_functions = {
9848 .get_linkerrors = hpsa_sas_get_linkerrors,
9849 .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier,
9850 .get_bay_identifier = hpsa_sas_get_bay_identifier,
9851 .phy_reset = hpsa_sas_phy_reset,
9852 .phy_enable = hpsa_sas_phy_enable,
9853 .phy_setup = hpsa_sas_phy_setup,
9854 .phy_release = hpsa_sas_phy_release,
9855 .set_phy_speed = hpsa_sas_phy_speed,
9856};
9857
9858
9859
9860
9861
9862static int __init hpsa_init(void)
9863{
9864 int rc;
9865
9866 hpsa_sas_transport_template =
9867 sas_attach_transport(&hpsa_sas_transport_functions);
9868 if (!hpsa_sas_transport_template)
9869 return -ENODEV;
9870
9871 rc = pci_register_driver(&hpsa_pci_driver);
9872
9873 if (rc)
9874 sas_release_transport(hpsa_sas_transport_template);
9875
9876 return rc;
9877}
9878
9879static void __exit hpsa_cleanup(void)
9880{
9881 pci_unregister_driver(&hpsa_pci_driver);
9882 sas_release_transport(hpsa_sas_transport_template);
9883}
9884
9885static void __attribute__((unused)) verify_offsets(void)
9886{
9887#define VERIFY_OFFSET(member, offset) \
9888 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
9889
9890 VERIFY_OFFSET(structure_size, 0);
9891 VERIFY_OFFSET(volume_blk_size, 4);
9892 VERIFY_OFFSET(volume_blk_cnt, 8);
9893 VERIFY_OFFSET(phys_blk_shift, 16);
9894 VERIFY_OFFSET(parity_rotation_shift, 17);
9895 VERIFY_OFFSET(strip_size, 18);
9896 VERIFY_OFFSET(disk_starting_blk, 20);
9897 VERIFY_OFFSET(disk_blk_cnt, 28);
9898 VERIFY_OFFSET(data_disks_per_row, 36);
9899 VERIFY_OFFSET(metadata_disks_per_row, 38);
9900 VERIFY_OFFSET(row_cnt, 40);
9901 VERIFY_OFFSET(layout_map_count, 42);
9902 VERIFY_OFFSET(flags, 44);
9903 VERIFY_OFFSET(dekindex, 46);
9904
9905 VERIFY_OFFSET(data, 64);
9906
9907#undef VERIFY_OFFSET
9908
9909#define VERIFY_OFFSET(member, offset) \
9910 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
9911
9912 VERIFY_OFFSET(IU_type, 0);
9913 VERIFY_OFFSET(direction, 1);
9914 VERIFY_OFFSET(reply_queue, 2);
9915
9916 VERIFY_OFFSET(scsi_nexus, 4);
9917 VERIFY_OFFSET(Tag, 8);
9918 VERIFY_OFFSET(cdb, 16);
9919 VERIFY_OFFSET(cciss_lun, 32);
9920 VERIFY_OFFSET(data_len, 40);
9921 VERIFY_OFFSET(cmd_priority_task_attr, 44);
9922 VERIFY_OFFSET(sg_count, 45);
9923
9924 VERIFY_OFFSET(err_ptr, 48);
9925 VERIFY_OFFSET(err_len, 56);
9926
9927 VERIFY_OFFSET(sg, 64);
9928
9929#undef VERIFY_OFFSET
9930
9931#define VERIFY_OFFSET(member, offset) \
9932 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
9933
9934 VERIFY_OFFSET(dev_handle, 0x00);
9935 VERIFY_OFFSET(reserved1, 0x02);
9936 VERIFY_OFFSET(function, 0x03);
9937 VERIFY_OFFSET(reserved2, 0x04);
9938 VERIFY_OFFSET(err_info, 0x0C);
9939 VERIFY_OFFSET(reserved3, 0x10);
9940 VERIFY_OFFSET(err_info_len, 0x12);
9941 VERIFY_OFFSET(reserved4, 0x13);
9942 VERIFY_OFFSET(sgl_offset, 0x14);
9943 VERIFY_OFFSET(reserved5, 0x15);
9944 VERIFY_OFFSET(transfer_len, 0x1C);
9945 VERIFY_OFFSET(reserved6, 0x20);
9946 VERIFY_OFFSET(io_flags, 0x24);
9947 VERIFY_OFFSET(reserved7, 0x26);
9948 VERIFY_OFFSET(LUN, 0x34);
9949 VERIFY_OFFSET(control, 0x3C);
9950 VERIFY_OFFSET(CDB, 0x40);
9951 VERIFY_OFFSET(reserved8, 0x50);
9952 VERIFY_OFFSET(host_context_flags, 0x60);
9953 VERIFY_OFFSET(timeout_sec, 0x62);
9954 VERIFY_OFFSET(ReplyQueue, 0x64);
9955 VERIFY_OFFSET(reserved9, 0x65);
9956 VERIFY_OFFSET(tag, 0x68);
9957 VERIFY_OFFSET(host_addr, 0x70);
9958 VERIFY_OFFSET(CISS_LUN, 0x78);
9959 VERIFY_OFFSET(SG, 0x78 + 8);
9960#undef VERIFY_OFFSET
9961}
9962
9963module_init(hpsa_init);
9964module_exit(hpsa_cleanup);
9965