1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/module.h>
22#include <linux/interrupt.h>
23#include <linux/types.h>
24#include <linux/pci.h>
25#include <linux/kernel.h>
26#include <linux/slab.h>
27#include <linux/delay.h>
28#include <linux/fs.h>
29#include <linux/timer.h>
30#include <linux/init.h>
31#include <linux/spinlock.h>
32#include <linux/compat.h>
33#include <linux/blktrace_api.h>
34#include <linux/uaccess.h>
35#include <linux/io.h>
36#include <linux/dma-mapping.h>
37#include <linux/completion.h>
38#include <linux/moduleparam.h>
39#include <scsi/scsi.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/scsi_device.h>
42#include <scsi/scsi_host.h>
43#include <scsi/scsi_tcq.h>
44#include <scsi/scsi_eh.h>
45#include <scsi/scsi_transport_sas.h>
46#include <scsi/scsi_dbg.h>
47#include <linux/cciss_ioctl.h>
48#include <linux/string.h>
49#include <linux/bitmap.h>
50#include <linux/atomic.h>
51#include <linux/jiffies.h>
52#include <linux/percpu-defs.h>
53#include <linux/percpu.h>
54#include <asm/unaligned.h>
55#include <asm/div64.h>
56#include "hpsa_cmd.h"
57#include "hpsa.h"
58
59
60
61
62
63#define HPSA_DRIVER_VERSION "3.4.20-200"
64#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
65#define HPSA "hpsa"
66
67
68#define CLEAR_EVENT_WAIT_INTERVAL 20
69#define MODE_CHANGE_WAIT_INTERVAL 10
70#define MAX_CLEAR_EVENT_WAIT 30000
71#define MAX_MODE_CHANGE_WAIT 2000
72#define MAX_IOCTL_CONFIG_WAIT 1000
73
74
75#define MAX_CMD_RETRIES 3
76
77#define HPSA_EH_PTRAID_TIMEOUT (240 * HZ)
78
79
80MODULE_AUTHOR("Hewlett-Packard Company");
81MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
82 HPSA_DRIVER_VERSION);
83MODULE_VERSION(HPSA_DRIVER_VERSION);
84MODULE_LICENSE("GPL");
85MODULE_ALIAS("cciss");
86
87static int hpsa_simple_mode;
88module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
89MODULE_PARM_DESC(hpsa_simple_mode,
90 "Use 'simple mode' rather than 'performant mode'");
91
92
93static const struct pci_device_id hpsa_pci_device_id[] = {
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1920},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1925},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
132 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
133 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
134 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
135 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
136 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
137 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
138 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
139 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
140 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
141 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
142 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
143 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
144 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
145 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
146 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
147 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
148 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
149 {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
150 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
151 {0,}
152};
153
154MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
155
156
157
158
159
160static struct board_type products[] = {
161 {0x40700E11, "Smart Array 5300", &SA5A_access},
162 {0x40800E11, "Smart Array 5i", &SA5B_access},
163 {0x40820E11, "Smart Array 532", &SA5B_access},
164 {0x40830E11, "Smart Array 5312", &SA5B_access},
165 {0x409A0E11, "Smart Array 641", &SA5A_access},
166 {0x409B0E11, "Smart Array 642", &SA5A_access},
167 {0x409C0E11, "Smart Array 6400", &SA5A_access},
168 {0x409D0E11, "Smart Array 6400 EM", &SA5A_access},
169 {0x40910E11, "Smart Array 6i", &SA5A_access},
170 {0x3225103C, "Smart Array P600", &SA5A_access},
171 {0x3223103C, "Smart Array P800", &SA5A_access},
172 {0x3234103C, "Smart Array P400", &SA5A_access},
173 {0x3235103C, "Smart Array P400i", &SA5A_access},
174 {0x3211103C, "Smart Array E200i", &SA5A_access},
175 {0x3212103C, "Smart Array E200", &SA5A_access},
176 {0x3213103C, "Smart Array E200i", &SA5A_access},
177 {0x3214103C, "Smart Array E200i", &SA5A_access},
178 {0x3215103C, "Smart Array E200i", &SA5A_access},
179 {0x3237103C, "Smart Array E500", &SA5A_access},
180 {0x323D103C, "Smart Array P700m", &SA5A_access},
181 {0x3241103C, "Smart Array P212", &SA5_access},
182 {0x3243103C, "Smart Array P410", &SA5_access},
183 {0x3245103C, "Smart Array P410i", &SA5_access},
184 {0x3247103C, "Smart Array P411", &SA5_access},
185 {0x3249103C, "Smart Array P812", &SA5_access},
186 {0x324A103C, "Smart Array P712m", &SA5_access},
187 {0x324B103C, "Smart Array P711m", &SA5_access},
188 {0x3233103C, "HP StorageWorks 1210m", &SA5_access},
189 {0x3350103C, "Smart Array P222", &SA5_access},
190 {0x3351103C, "Smart Array P420", &SA5_access},
191 {0x3352103C, "Smart Array P421", &SA5_access},
192 {0x3353103C, "Smart Array P822", &SA5_access},
193 {0x3354103C, "Smart Array P420i", &SA5_access},
194 {0x3355103C, "Smart Array P220i", &SA5_access},
195 {0x3356103C, "Smart Array P721m", &SA5_access},
196 {0x1920103C, "Smart Array P430i", &SA5_access},
197 {0x1921103C, "Smart Array P830i", &SA5_access},
198 {0x1922103C, "Smart Array P430", &SA5_access},
199 {0x1923103C, "Smart Array P431", &SA5_access},
200 {0x1924103C, "Smart Array P830", &SA5_access},
201 {0x1925103C, "Smart Array P831", &SA5_access},
202 {0x1926103C, "Smart Array P731m", &SA5_access},
203 {0x1928103C, "Smart Array P230i", &SA5_access},
204 {0x1929103C, "Smart Array P530", &SA5_access},
205 {0x21BD103C, "Smart Array P244br", &SA5_access},
206 {0x21BE103C, "Smart Array P741m", &SA5_access},
207 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
208 {0x21C0103C, "Smart Array P440ar", &SA5_access},
209 {0x21C1103C, "Smart Array P840ar", &SA5_access},
210 {0x21C2103C, "Smart Array P440", &SA5_access},
211 {0x21C3103C, "Smart Array P441", &SA5_access},
212 {0x21C4103C, "Smart Array", &SA5_access},
213 {0x21C5103C, "Smart Array P841", &SA5_access},
214 {0x21C6103C, "Smart HBA H244br", &SA5_access},
215 {0x21C7103C, "Smart HBA H240", &SA5_access},
216 {0x21C8103C, "Smart HBA H241", &SA5_access},
217 {0x21C9103C, "Smart Array", &SA5_access},
218 {0x21CA103C, "Smart Array P246br", &SA5_access},
219 {0x21CB103C, "Smart Array P840", &SA5_access},
220 {0x21CC103C, "Smart Array", &SA5_access},
221 {0x21CD103C, "Smart Array", &SA5_access},
222 {0x21CE103C, "Smart HBA", &SA5_access},
223 {0x05809005, "SmartHBA-SA", &SA5_access},
224 {0x05819005, "SmartHBA-SA 8i", &SA5_access},
225 {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
226 {0x05839005, "SmartHBA-SA 8e", &SA5_access},
227 {0x05849005, "SmartHBA-SA 16i", &SA5_access},
228 {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
229 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
230 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
231 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
232 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
233 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
234 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
235};
236
237static struct scsi_transport_template *hpsa_sas_transport_template;
238static int hpsa_add_sas_host(struct ctlr_info *h);
239static void hpsa_delete_sas_host(struct ctlr_info *h);
240static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
241 struct hpsa_scsi_dev_t *device);
242static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device);
243static struct hpsa_scsi_dev_t
244 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
245 struct sas_rphy *rphy);
246
247#define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
248static const struct scsi_cmnd hpsa_cmd_busy;
249#define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
250static const struct scsi_cmnd hpsa_cmd_idle;
251static int number_of_controllers;
252
253static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
254static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
255static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
256 void __user *arg);
257static int hpsa_passthru_ioctl(struct ctlr_info *h,
258 IOCTL_Command_struct *iocommand);
259static int hpsa_big_passthru_ioctl(struct ctlr_info *h,
260 BIG_IOCTL_Command_struct *ioc);
261
262#ifdef CONFIG_COMPAT
263static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
264 void __user *arg);
265#endif
266
267static void cmd_free(struct ctlr_info *h, struct CommandList *c);
268static struct CommandList *cmd_alloc(struct ctlr_info *h);
269static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
270static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
271 struct scsi_cmnd *scmd);
272static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
273 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
274 int cmd_type);
275static void hpsa_free_cmd_pool(struct ctlr_info *h);
276#define VPD_PAGE (1 << 8)
277#define HPSA_SIMPLE_ERROR_BITS 0x03
278
279static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
280static void hpsa_scan_start(struct Scsi_Host *);
281static int hpsa_scan_finished(struct Scsi_Host *sh,
282 unsigned long elapsed_time);
283static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
284
285static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
286static int hpsa_slave_alloc(struct scsi_device *sdev);
287static int hpsa_slave_configure(struct scsi_device *sdev);
288static void hpsa_slave_destroy(struct scsi_device *sdev);
289
290static void hpsa_update_scsi_devices(struct ctlr_info *h);
291static int check_for_unit_attention(struct ctlr_info *h,
292 struct CommandList *c);
293static void check_ioctl_unit_attention(struct ctlr_info *h,
294 struct CommandList *c);
295
296static void calc_bucket_map(int *bucket, int num_buckets,
297 int nsgs, int min_blocks, u32 *bucket_map);
298static void hpsa_free_performant_mode(struct ctlr_info *h);
299static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
300static inline u32 next_command(struct ctlr_info *h, u8 q);
301static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
302 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
303 u64 *cfg_offset);
304static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
305 unsigned long *memory_bar);
306static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
307 bool *legacy_board);
308static int wait_for_device_to_become_ready(struct ctlr_info *h,
309 unsigned char lunaddr[],
310 int reply_queue);
311static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
312 int wait_for_ready);
313static inline void finish_cmd(struct CommandList *c);
314static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
315#define BOARD_NOT_READY 0
316#define BOARD_READY 1
317static void hpsa_drain_accel_commands(struct ctlr_info *h);
318static void hpsa_flush_cache(struct ctlr_info *h);
319static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
320 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
321 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
322static void hpsa_command_resubmit_worker(struct work_struct *work);
323static u32 lockup_detected(struct ctlr_info *h);
324static int detect_controller_lockup(struct ctlr_info *h);
325static void hpsa_disable_rld_caching(struct ctlr_info *h);
326static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
327 struct ReportExtendedLUNdata *buf, int bufsize);
328static bool hpsa_vpd_page_supported(struct ctlr_info *h,
329 unsigned char scsi3addr[], u8 page);
330static int hpsa_luns_changed(struct ctlr_info *h);
331static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
332 struct hpsa_scsi_dev_t *dev,
333 unsigned char *scsi3addr);
334
335static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
336{
337 unsigned long *priv = shost_priv(sdev->host);
338 return (struct ctlr_info *) *priv;
339}
340
341static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
342{
343 unsigned long *priv = shost_priv(sh);
344 return (struct ctlr_info *) *priv;
345}
346
347static inline bool hpsa_is_cmd_idle(struct CommandList *c)
348{
349 return c->scsi_cmd == SCSI_CMD_IDLE;
350}
351
352
353static void decode_sense_data(const u8 *sense_data, int sense_data_len,
354 u8 *sense_key, u8 *asc, u8 *ascq)
355{
356 struct scsi_sense_hdr sshdr;
357 bool rc;
358
359 *sense_key = -1;
360 *asc = -1;
361 *ascq = -1;
362
363 if (sense_data_len < 1)
364 return;
365
366 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
367 if (rc) {
368 *sense_key = sshdr.sense_key;
369 *asc = sshdr.asc;
370 *ascq = sshdr.ascq;
371 }
372}
373
374static int check_for_unit_attention(struct ctlr_info *h,
375 struct CommandList *c)
376{
377 u8 sense_key, asc, ascq;
378 int sense_len;
379
380 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
381 sense_len = sizeof(c->err_info->SenseInfo);
382 else
383 sense_len = c->err_info->SenseLen;
384
385 decode_sense_data(c->err_info->SenseInfo, sense_len,
386 &sense_key, &asc, &ascq);
387 if (sense_key != UNIT_ATTENTION || asc == 0xff)
388 return 0;
389
390 switch (asc) {
391 case STATE_CHANGED:
392 dev_warn(&h->pdev->dev,
393 "%s: a state change detected, command retried\n",
394 h->devname);
395 break;
396 case LUN_FAILED:
397 dev_warn(&h->pdev->dev,
398 "%s: LUN failure detected\n", h->devname);
399 break;
400 case REPORT_LUNS_CHANGED:
401 dev_warn(&h->pdev->dev,
402 "%s: report LUN data changed\n", h->devname);
403
404
405
406
407 break;
408 case POWER_OR_RESET:
409 dev_warn(&h->pdev->dev,
410 "%s: a power on or device reset detected\n",
411 h->devname);
412 break;
413 case UNIT_ATTENTION_CLEARED:
414 dev_warn(&h->pdev->dev,
415 "%s: unit attention cleared by another initiator\n",
416 h->devname);
417 break;
418 default:
419 dev_warn(&h->pdev->dev,
420 "%s: unknown unit attention detected\n",
421 h->devname);
422 break;
423 }
424 return 1;
425}
426
427static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
428{
429 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
430 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
431 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
432 return 0;
433 dev_warn(&h->pdev->dev, HPSA "device busy");
434 return 1;
435}
436
437static u32 lockup_detected(struct ctlr_info *h);
438static ssize_t host_show_lockup_detected(struct device *dev,
439 struct device_attribute *attr, char *buf)
440{
441 int ld;
442 struct ctlr_info *h;
443 struct Scsi_Host *shost = class_to_shost(dev);
444
445 h = shost_to_hba(shost);
446 ld = lockup_detected(h);
447
448 return sprintf(buf, "ld=%d\n", ld);
449}
450
451static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
452 struct device_attribute *attr,
453 const char *buf, size_t count)
454{
455 int status, len;
456 struct ctlr_info *h;
457 struct Scsi_Host *shost = class_to_shost(dev);
458 char tmpbuf[10];
459
460 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
461 return -EACCES;
462 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
463 strncpy(tmpbuf, buf, len);
464 tmpbuf[len] = '\0';
465 if (sscanf(tmpbuf, "%d", &status) != 1)
466 return -EINVAL;
467 h = shost_to_hba(shost);
468 h->acciopath_status = !!status;
469 dev_warn(&h->pdev->dev,
470 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
471 h->acciopath_status ? "enabled" : "disabled");
472 return count;
473}
474
475static ssize_t host_store_raid_offload_debug(struct device *dev,
476 struct device_attribute *attr,
477 const char *buf, size_t count)
478{
479 int debug_level, len;
480 struct ctlr_info *h;
481 struct Scsi_Host *shost = class_to_shost(dev);
482 char tmpbuf[10];
483
484 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
485 return -EACCES;
486 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
487 strncpy(tmpbuf, buf, len);
488 tmpbuf[len] = '\0';
489 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
490 return -EINVAL;
491 if (debug_level < 0)
492 debug_level = 0;
493 h = shost_to_hba(shost);
494 h->raid_offload_debug = debug_level;
495 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
496 h->raid_offload_debug);
497 return count;
498}
499
500static ssize_t host_store_rescan(struct device *dev,
501 struct device_attribute *attr,
502 const char *buf, size_t count)
503{
504 struct ctlr_info *h;
505 struct Scsi_Host *shost = class_to_shost(dev);
506 h = shost_to_hba(shost);
507 hpsa_scan_start(h->scsi_host);
508 return count;
509}
510
511static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device)
512{
513 device->offload_enabled = 0;
514 device->offload_to_be_enabled = 0;
515}
516
517static ssize_t host_show_firmware_revision(struct device *dev,
518 struct device_attribute *attr, char *buf)
519{
520 struct ctlr_info *h;
521 struct Scsi_Host *shost = class_to_shost(dev);
522 unsigned char *fwrev;
523
524 h = shost_to_hba(shost);
525 if (!h->hba_inquiry_data)
526 return 0;
527 fwrev = &h->hba_inquiry_data[32];
528 return snprintf(buf, 20, "%c%c%c%c\n",
529 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
530}
531
532static ssize_t host_show_commands_outstanding(struct device *dev,
533 struct device_attribute *attr, char *buf)
534{
535 struct Scsi_Host *shost = class_to_shost(dev);
536 struct ctlr_info *h = shost_to_hba(shost);
537
538 return snprintf(buf, 20, "%d\n",
539 atomic_read(&h->commands_outstanding));
540}
541
542static ssize_t host_show_transport_mode(struct device *dev,
543 struct device_attribute *attr, char *buf)
544{
545 struct ctlr_info *h;
546 struct Scsi_Host *shost = class_to_shost(dev);
547
548 h = shost_to_hba(shost);
549 return snprintf(buf, 20, "%s\n",
550 h->transMethod & CFGTBL_Trans_Performant ?
551 "performant" : "simple");
552}
553
554static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
555 struct device_attribute *attr, char *buf)
556{
557 struct ctlr_info *h;
558 struct Scsi_Host *shost = class_to_shost(dev);
559
560 h = shost_to_hba(shost);
561 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
562 (h->acciopath_status == 1) ? "enabled" : "disabled");
563}
564
565
566static u32 unresettable_controller[] = {
567 0x324a103C,
568 0x324b103C,
569 0x3223103C,
570 0x3234103C,
571 0x3235103C,
572 0x3211103C,
573 0x3212103C,
574 0x3213103C,
575 0x3214103C,
576 0x3215103C,
577 0x3237103C,
578 0x323D103C,
579 0x40800E11,
580 0x409C0E11,
581 0x409D0E11,
582 0x40700E11,
583 0x40820E11,
584 0x40830E11,
585 0x409A0E11,
586 0x409B0E11,
587 0x40910E11,
588};
589
590
591static u32 soft_unresettable_controller[] = {
592 0x40800E11,
593 0x40700E11,
594 0x40820E11,
595 0x40830E11,
596 0x409A0E11,
597 0x409B0E11,
598 0x40910E11,
599
600
601
602
603
604
605
606 0x409C0E11,
607 0x409D0E11,
608};
609
610static int board_id_in_array(u32 a[], int nelems, u32 board_id)
611{
612 int i;
613
614 for (i = 0; i < nelems; i++)
615 if (a[i] == board_id)
616 return 1;
617 return 0;
618}
619
620static int ctlr_is_hard_resettable(u32 board_id)
621{
622 return !board_id_in_array(unresettable_controller,
623 ARRAY_SIZE(unresettable_controller), board_id);
624}
625
626static int ctlr_is_soft_resettable(u32 board_id)
627{
628 return !board_id_in_array(soft_unresettable_controller,
629 ARRAY_SIZE(soft_unresettable_controller), board_id);
630}
631
632static int ctlr_is_resettable(u32 board_id)
633{
634 return ctlr_is_hard_resettable(board_id) ||
635 ctlr_is_soft_resettable(board_id);
636}
637
638static ssize_t host_show_resettable(struct device *dev,
639 struct device_attribute *attr, char *buf)
640{
641 struct ctlr_info *h;
642 struct Scsi_Host *shost = class_to_shost(dev);
643
644 h = shost_to_hba(shost);
645 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
646}
647
648static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
649{
650 return (scsi3addr[3] & 0xC0) == 0x40;
651}
652
653static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
654 "1(+0)ADM", "UNKNOWN", "PHYS DRV"
655};
656#define HPSA_RAID_0 0
657#define HPSA_RAID_4 1
658#define HPSA_RAID_1 2
659#define HPSA_RAID_5 3
660#define HPSA_RAID_51 4
661#define HPSA_RAID_6 5
662#define HPSA_RAID_ADM 6
663#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
664#define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
665
666static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
667{
668 return !device->physical_device;
669}
670
671static ssize_t raid_level_show(struct device *dev,
672 struct device_attribute *attr, char *buf)
673{
674 ssize_t l = 0;
675 unsigned char rlevel;
676 struct ctlr_info *h;
677 struct scsi_device *sdev;
678 struct hpsa_scsi_dev_t *hdev;
679 unsigned long flags;
680
681 sdev = to_scsi_device(dev);
682 h = sdev_to_hba(sdev);
683 spin_lock_irqsave(&h->lock, flags);
684 hdev = sdev->hostdata;
685 if (!hdev) {
686 spin_unlock_irqrestore(&h->lock, flags);
687 return -ENODEV;
688 }
689
690
691 if (!is_logical_device(hdev)) {
692 spin_unlock_irqrestore(&h->lock, flags);
693 l = snprintf(buf, PAGE_SIZE, "N/A\n");
694 return l;
695 }
696
697 rlevel = hdev->raid_level;
698 spin_unlock_irqrestore(&h->lock, flags);
699 if (rlevel > RAID_UNKNOWN)
700 rlevel = RAID_UNKNOWN;
701 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
702 return l;
703}
704
705static ssize_t lunid_show(struct device *dev,
706 struct device_attribute *attr, char *buf)
707{
708 struct ctlr_info *h;
709 struct scsi_device *sdev;
710 struct hpsa_scsi_dev_t *hdev;
711 unsigned long flags;
712 unsigned char lunid[8];
713
714 sdev = to_scsi_device(dev);
715 h = sdev_to_hba(sdev);
716 spin_lock_irqsave(&h->lock, flags);
717 hdev = sdev->hostdata;
718 if (!hdev) {
719 spin_unlock_irqrestore(&h->lock, flags);
720 return -ENODEV;
721 }
722 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
723 spin_unlock_irqrestore(&h->lock, flags);
724 return snprintf(buf, 20, "0x%8phN\n", lunid);
725}
726
727static ssize_t unique_id_show(struct device *dev,
728 struct device_attribute *attr, char *buf)
729{
730 struct ctlr_info *h;
731 struct scsi_device *sdev;
732 struct hpsa_scsi_dev_t *hdev;
733 unsigned long flags;
734 unsigned char sn[16];
735
736 sdev = to_scsi_device(dev);
737 h = sdev_to_hba(sdev);
738 spin_lock_irqsave(&h->lock, flags);
739 hdev = sdev->hostdata;
740 if (!hdev) {
741 spin_unlock_irqrestore(&h->lock, flags);
742 return -ENODEV;
743 }
744 memcpy(sn, hdev->device_id, sizeof(sn));
745 spin_unlock_irqrestore(&h->lock, flags);
746 return snprintf(buf, 16 * 2 + 2,
747 "%02X%02X%02X%02X%02X%02X%02X%02X"
748 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
749 sn[0], sn[1], sn[2], sn[3],
750 sn[4], sn[5], sn[6], sn[7],
751 sn[8], sn[9], sn[10], sn[11],
752 sn[12], sn[13], sn[14], sn[15]);
753}
754
755static ssize_t sas_address_show(struct device *dev,
756 struct device_attribute *attr, char *buf)
757{
758 struct ctlr_info *h;
759 struct scsi_device *sdev;
760 struct hpsa_scsi_dev_t *hdev;
761 unsigned long flags;
762 u64 sas_address;
763
764 sdev = to_scsi_device(dev);
765 h = sdev_to_hba(sdev);
766 spin_lock_irqsave(&h->lock, flags);
767 hdev = sdev->hostdata;
768 if (!hdev || is_logical_device(hdev) || !hdev->expose_device) {
769 spin_unlock_irqrestore(&h->lock, flags);
770 return -ENODEV;
771 }
772 sas_address = hdev->sas_address;
773 spin_unlock_irqrestore(&h->lock, flags);
774
775 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address);
776}
777
778static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
779 struct device_attribute *attr, char *buf)
780{
781 struct ctlr_info *h;
782 struct scsi_device *sdev;
783 struct hpsa_scsi_dev_t *hdev;
784 unsigned long flags;
785 int offload_enabled;
786
787 sdev = to_scsi_device(dev);
788 h = sdev_to_hba(sdev);
789 spin_lock_irqsave(&h->lock, flags);
790 hdev = sdev->hostdata;
791 if (!hdev) {
792 spin_unlock_irqrestore(&h->lock, flags);
793 return -ENODEV;
794 }
795 offload_enabled = hdev->offload_enabled;
796 spin_unlock_irqrestore(&h->lock, flags);
797
798 if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC)
799 return snprintf(buf, 20, "%d\n", offload_enabled);
800 else
801 return snprintf(buf, 40, "%s\n",
802 "Not applicable for a controller");
803}
804
805#define MAX_PATHS 8
806static ssize_t path_info_show(struct device *dev,
807 struct device_attribute *attr, char *buf)
808{
809 struct ctlr_info *h;
810 struct scsi_device *sdev;
811 struct hpsa_scsi_dev_t *hdev;
812 unsigned long flags;
813 int i;
814 int output_len = 0;
815 u8 box;
816 u8 bay;
817 u8 path_map_index = 0;
818 char *active;
819 unsigned char phys_connector[2];
820
821 sdev = to_scsi_device(dev);
822 h = sdev_to_hba(sdev);
823 spin_lock_irqsave(&h->devlock, flags);
824 hdev = sdev->hostdata;
825 if (!hdev) {
826 spin_unlock_irqrestore(&h->devlock, flags);
827 return -ENODEV;
828 }
829
830 bay = hdev->bay;
831 for (i = 0; i < MAX_PATHS; i++) {
832 path_map_index = 1<<i;
833 if (i == hdev->active_path_index)
834 active = "Active";
835 else if (hdev->path_map & path_map_index)
836 active = "Inactive";
837 else
838 continue;
839
840 output_len += scnprintf(buf + output_len,
841 PAGE_SIZE - output_len,
842 "[%d:%d:%d:%d] %20.20s ",
843 h->scsi_host->host_no,
844 hdev->bus, hdev->target, hdev->lun,
845 scsi_device_type(hdev->devtype));
846
847 if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) {
848 output_len += scnprintf(buf + output_len,
849 PAGE_SIZE - output_len,
850 "%s\n", active);
851 continue;
852 }
853
854 box = hdev->box[i];
855 memcpy(&phys_connector, &hdev->phys_connector[i],
856 sizeof(phys_connector));
857 if (phys_connector[0] < '0')
858 phys_connector[0] = '0';
859 if (phys_connector[1] < '0')
860 phys_connector[1] = '0';
861 output_len += scnprintf(buf + output_len,
862 PAGE_SIZE - output_len,
863 "PORT: %.2s ",
864 phys_connector);
865 if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) &&
866 hdev->expose_device) {
867 if (box == 0 || box == 0xFF) {
868 output_len += scnprintf(buf + output_len,
869 PAGE_SIZE - output_len,
870 "BAY: %hhu %s\n",
871 bay, active);
872 } else {
873 output_len += scnprintf(buf + output_len,
874 PAGE_SIZE - output_len,
875 "BOX: %hhu BAY: %hhu %s\n",
876 box, bay, active);
877 }
878 } else if (box != 0 && box != 0xFF) {
879 output_len += scnprintf(buf + output_len,
880 PAGE_SIZE - output_len, "BOX: %hhu %s\n",
881 box, active);
882 } else
883 output_len += scnprintf(buf + output_len,
884 PAGE_SIZE - output_len, "%s\n", active);
885 }
886
887 spin_unlock_irqrestore(&h->devlock, flags);
888 return output_len;
889}
890
891static ssize_t host_show_ctlr_num(struct device *dev,
892 struct device_attribute *attr, char *buf)
893{
894 struct ctlr_info *h;
895 struct Scsi_Host *shost = class_to_shost(dev);
896
897 h = shost_to_hba(shost);
898 return snprintf(buf, 20, "%d\n", h->ctlr);
899}
900
901static ssize_t host_show_legacy_board(struct device *dev,
902 struct device_attribute *attr, char *buf)
903{
904 struct ctlr_info *h;
905 struct Scsi_Host *shost = class_to_shost(dev);
906
907 h = shost_to_hba(shost);
908 return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0);
909}
910
911static DEVICE_ATTR_RO(raid_level);
912static DEVICE_ATTR_RO(lunid);
913static DEVICE_ATTR_RO(unique_id);
914static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
915static DEVICE_ATTR_RO(sas_address);
916static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
917 host_show_hp_ssd_smart_path_enabled, NULL);
918static DEVICE_ATTR_RO(path_info);
919static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
920 host_show_hp_ssd_smart_path_status,
921 host_store_hp_ssd_smart_path_status);
922static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
923 host_store_raid_offload_debug);
924static DEVICE_ATTR(firmware_revision, S_IRUGO,
925 host_show_firmware_revision, NULL);
926static DEVICE_ATTR(commands_outstanding, S_IRUGO,
927 host_show_commands_outstanding, NULL);
928static DEVICE_ATTR(transport_mode, S_IRUGO,
929 host_show_transport_mode, NULL);
930static DEVICE_ATTR(resettable, S_IRUGO,
931 host_show_resettable, NULL);
932static DEVICE_ATTR(lockup_detected, S_IRUGO,
933 host_show_lockup_detected, NULL);
934static DEVICE_ATTR(ctlr_num, S_IRUGO,
935 host_show_ctlr_num, NULL);
936static DEVICE_ATTR(legacy_board, S_IRUGO,
937 host_show_legacy_board, NULL);
938
939static struct device_attribute *hpsa_sdev_attrs[] = {
940 &dev_attr_raid_level,
941 &dev_attr_lunid,
942 &dev_attr_unique_id,
943 &dev_attr_hp_ssd_smart_path_enabled,
944 &dev_attr_path_info,
945 &dev_attr_sas_address,
946 NULL,
947};
948
949static struct device_attribute *hpsa_shost_attrs[] = {
950 &dev_attr_rescan,
951 &dev_attr_firmware_revision,
952 &dev_attr_commands_outstanding,
953 &dev_attr_transport_mode,
954 &dev_attr_resettable,
955 &dev_attr_hp_ssd_smart_path_status,
956 &dev_attr_raid_offload_debug,
957 &dev_attr_lockup_detected,
958 &dev_attr_ctlr_num,
959 &dev_attr_legacy_board,
960 NULL,
961};
962
963#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\
964 HPSA_MAX_CONCURRENT_PASSTHRUS)
965
966static struct scsi_host_template hpsa_driver_template = {
967 .module = THIS_MODULE,
968 .name = HPSA,
969 .proc_name = HPSA,
970 .queuecommand = hpsa_scsi_queue_command,
971 .scan_start = hpsa_scan_start,
972 .scan_finished = hpsa_scan_finished,
973 .change_queue_depth = hpsa_change_queue_depth,
974 .this_id = -1,
975 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
976 .ioctl = hpsa_ioctl,
977 .slave_alloc = hpsa_slave_alloc,
978 .slave_configure = hpsa_slave_configure,
979 .slave_destroy = hpsa_slave_destroy,
980#ifdef CONFIG_COMPAT
981 .compat_ioctl = hpsa_compat_ioctl,
982#endif
983 .sdev_attrs = hpsa_sdev_attrs,
984 .shost_attrs = hpsa_shost_attrs,
985 .max_sectors = 2048,
986 .no_write_same = 1,
987};
988
989static inline u32 next_command(struct ctlr_info *h, u8 q)
990{
991 u32 a;
992 struct reply_queue_buffer *rq = &h->reply_queue[q];
993
994 if (h->transMethod & CFGTBL_Trans_io_accel1)
995 return h->access.command_completed(h, q);
996
997 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
998 return h->access.command_completed(h, q);
999
1000 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
1001 a = rq->head[rq->current_entry];
1002 rq->current_entry++;
1003 atomic_dec(&h->commands_outstanding);
1004 } else {
1005 a = FIFO_EMPTY;
1006 }
1007
1008 if (rq->current_entry == h->max_commands) {
1009 rq->current_entry = 0;
1010 rq->wraparound ^= 1;
1011 }
1012 return a;
1013}
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046#define DEFAULT_REPLY_QUEUE (-1)
1047static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
1048 int reply_queue)
1049{
1050 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
1051 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
1052 if (unlikely(!h->msix_vectors))
1053 return;
1054 c->Header.ReplyQueue = reply_queue;
1055 }
1056}
1057
1058static void set_ioaccel1_performant_mode(struct ctlr_info *h,
1059 struct CommandList *c,
1060 int reply_queue)
1061{
1062 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
1063
1064
1065
1066
1067
1068 cp->ReplyQueue = reply_queue;
1069
1070
1071
1072
1073
1074
1075 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
1076 IOACCEL1_BUSADDR_CMDTYPE;
1077}
1078
1079static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
1080 struct CommandList *c,
1081 int reply_queue)
1082{
1083 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
1084 &h->ioaccel2_cmd_pool[c->cmdindex];
1085
1086
1087
1088
1089 cp->reply_queue = reply_queue;
1090
1091
1092
1093
1094
1095 c->busaddr |= h->ioaccel2_blockFetchTable[0];
1096}
1097
1098static void set_ioaccel2_performant_mode(struct ctlr_info *h,
1099 struct CommandList *c,
1100 int reply_queue)
1101{
1102 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
1103
1104
1105
1106
1107
1108 cp->reply_queue = reply_queue;
1109
1110
1111
1112
1113
1114
1115 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
1116}
1117
1118static int is_firmware_flash_cmd(u8 *cdb)
1119{
1120 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
1121}
1122
1123
1124
1125
1126
1127
1128#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1129#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1130#define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ)
1131static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
1132 struct CommandList *c)
1133{
1134 if (!is_firmware_flash_cmd(c->Request.CDB))
1135 return;
1136 atomic_inc(&h->firmware_flash_in_progress);
1137 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
1138}
1139
1140static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
1141 struct CommandList *c)
1142{
1143 if (is_firmware_flash_cmd(c->Request.CDB) &&
1144 atomic_dec_and_test(&h->firmware_flash_in_progress))
1145 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
1146}
1147
1148static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
1149 struct CommandList *c, int reply_queue)
1150{
1151 dial_down_lockup_detection_during_fw_flash(h, c);
1152 atomic_inc(&h->commands_outstanding);
1153
1154
1155
1156 if (c->device && !c->retry_pending)
1157 atomic_inc(&c->device->commands_outstanding);
1158
1159 reply_queue = h->reply_map[raw_smp_processor_id()];
1160 switch (c->cmd_type) {
1161 case CMD_IOACCEL1:
1162 set_ioaccel1_performant_mode(h, c, reply_queue);
1163 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
1164 break;
1165 case CMD_IOACCEL2:
1166 set_ioaccel2_performant_mode(h, c, reply_queue);
1167 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1168 break;
1169 case IOACCEL2_TMF:
1170 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
1171 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1172 break;
1173 default:
1174 set_performant_mode(h, c, reply_queue);
1175 h->access.submit_command(h, c);
1176 }
1177}
1178
1179static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
1180{
1181 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
1182}
1183
1184static inline int is_hba_lunid(unsigned char scsi3addr[])
1185{
1186 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
1187}
1188
1189static inline int is_scsi_rev_5(struct ctlr_info *h)
1190{
1191 if (!h->hba_inquiry_data)
1192 return 0;
1193 if ((h->hba_inquiry_data[2] & 0x07) == 5)
1194 return 1;
1195 return 0;
1196}
1197
1198static int hpsa_find_target_lun(struct ctlr_info *h,
1199 unsigned char scsi3addr[], int bus, int *target, int *lun)
1200{
1201
1202
1203
1204 int i, found = 0;
1205 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
1206
1207 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
1208
1209 for (i = 0; i < h->ndevices; i++) {
1210 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
1211 __set_bit(h->dev[i]->target, lun_taken);
1212 }
1213
1214 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1215 if (i < HPSA_MAX_DEVICES) {
1216
1217 *target = i;
1218 *lun = 0;
1219 found = 1;
1220 }
1221 return !found;
1222}
1223
1224static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1225 struct hpsa_scsi_dev_t *dev, char *description)
1226{
1227#define LABEL_SIZE 25
1228 char label[LABEL_SIZE];
1229
1230 if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
1231 return;
1232
1233 switch (dev->devtype) {
1234 case TYPE_RAID:
1235 snprintf(label, LABEL_SIZE, "controller");
1236 break;
1237 case TYPE_ENCLOSURE:
1238 snprintf(label, LABEL_SIZE, "enclosure");
1239 break;
1240 case TYPE_DISK:
1241 case TYPE_ZBC:
1242 if (dev->external)
1243 snprintf(label, LABEL_SIZE, "external");
1244 else if (!is_logical_dev_addr_mode(dev->scsi3addr))
1245 snprintf(label, LABEL_SIZE, "%s",
1246 raid_label[PHYSICAL_DRIVE]);
1247 else
1248 snprintf(label, LABEL_SIZE, "RAID-%s",
1249 dev->raid_level > RAID_UNKNOWN ? "?" :
1250 raid_label[dev->raid_level]);
1251 break;
1252 case TYPE_ROM:
1253 snprintf(label, LABEL_SIZE, "rom");
1254 break;
1255 case TYPE_TAPE:
1256 snprintf(label, LABEL_SIZE, "tape");
1257 break;
1258 case TYPE_MEDIUM_CHANGER:
1259 snprintf(label, LABEL_SIZE, "changer");
1260 break;
1261 default:
1262 snprintf(label, LABEL_SIZE, "UNKNOWN");
1263 break;
1264 }
1265
1266 dev_printk(level, &h->pdev->dev,
1267 "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
1268 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1269 description,
1270 scsi_device_type(dev->devtype),
1271 dev->vendor,
1272 dev->model,
1273 label,
1274 dev->offload_config ? '+' : '-',
1275 dev->offload_to_be_enabled ? '+' : '-',
1276 dev->expose_device);
1277}
1278
1279
1280static int hpsa_scsi_add_entry(struct ctlr_info *h,
1281 struct hpsa_scsi_dev_t *device,
1282 struct hpsa_scsi_dev_t *added[], int *nadded)
1283{
1284
1285 int n = h->ndevices;
1286 int i;
1287 unsigned char addr1[8], addr2[8];
1288 struct hpsa_scsi_dev_t *sd;
1289
1290 if (n >= HPSA_MAX_DEVICES) {
1291 dev_err(&h->pdev->dev, "too many devices, some will be "
1292 "inaccessible.\n");
1293 return -1;
1294 }
1295
1296
1297 if (device->lun != -1)
1298
1299 goto lun_assigned;
1300
1301
1302
1303
1304
1305 if (device->scsi3addr[4] == 0) {
1306
1307 if (hpsa_find_target_lun(h, device->scsi3addr,
1308 device->bus, &device->target, &device->lun) != 0)
1309 return -1;
1310 goto lun_assigned;
1311 }
1312
1313
1314
1315
1316
1317
1318
1319 memcpy(addr1, device->scsi3addr, 8);
1320 addr1[4] = 0;
1321 addr1[5] = 0;
1322 for (i = 0; i < n; i++) {
1323 sd = h->dev[i];
1324 memcpy(addr2, sd->scsi3addr, 8);
1325 addr2[4] = 0;
1326 addr2[5] = 0;
1327
1328 if (memcmp(addr1, addr2, 8) == 0) {
1329 device->bus = sd->bus;
1330 device->target = sd->target;
1331 device->lun = device->scsi3addr[4];
1332 break;
1333 }
1334 }
1335 if (device->lun == -1) {
1336 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1337 " suspect firmware bug or unsupported hardware "
1338 "configuration.\n");
1339 return -1;
1340 }
1341
1342lun_assigned:
1343
1344 h->dev[n] = device;
1345 h->ndevices++;
1346 added[*nadded] = device;
1347 (*nadded)++;
1348 hpsa_show_dev_msg(KERN_INFO, h, device,
1349 device->expose_device ? "added" : "masked");
1350 return 0;
1351}
1352
1353
1354
1355
1356
1357
1358static void hpsa_scsi_update_entry(struct ctlr_info *h,
1359 int entry, struct hpsa_scsi_dev_t *new_entry)
1360{
1361
1362 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1363
1364
1365 h->dev[entry]->raid_level = new_entry->raid_level;
1366
1367
1368
1369
1370 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1371
1372
1373 if (new_entry->offload_config && new_entry->offload_to_be_enabled) {
1374
1375
1376
1377
1378
1379
1380
1381
1382 h->dev[entry]->raid_map = new_entry->raid_map;
1383 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1384 }
1385 if (new_entry->offload_to_be_enabled) {
1386 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1387 wmb();
1388 }
1389 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
1390 h->dev[entry]->offload_config = new_entry->offload_config;
1391 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1392 h->dev[entry]->queue_depth = new_entry->queue_depth;
1393
1394
1395
1396
1397
1398
1399 h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled;
1400
1401
1402
1403
1404 if (!new_entry->offload_to_be_enabled)
1405 h->dev[entry]->offload_enabled = 0;
1406
1407 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1408}
1409
1410
1411static void hpsa_scsi_replace_entry(struct ctlr_info *h,
1412 int entry, struct hpsa_scsi_dev_t *new_entry,
1413 struct hpsa_scsi_dev_t *added[], int *nadded,
1414 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1415{
1416
1417 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1418 removed[*nremoved] = h->dev[entry];
1419 (*nremoved)++;
1420
1421
1422
1423
1424
1425 if (new_entry->target == -1) {
1426 new_entry->target = h->dev[entry]->target;
1427 new_entry->lun = h->dev[entry]->lun;
1428 }
1429
1430 h->dev[entry] = new_entry;
1431 added[*nadded] = new_entry;
1432 (*nadded)++;
1433
1434 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1435}
1436
1437
1438static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
1439 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1440{
1441
1442 int i;
1443 struct hpsa_scsi_dev_t *sd;
1444
1445 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1446
1447 sd = h->dev[entry];
1448 removed[*nremoved] = h->dev[entry];
1449 (*nremoved)++;
1450
1451 for (i = entry; i < h->ndevices-1; i++)
1452 h->dev[i] = h->dev[i+1];
1453 h->ndevices--;
1454 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1455}
1456
1457#define SCSI3ADDR_EQ(a, b) ( \
1458 (a)[7] == (b)[7] && \
1459 (a)[6] == (b)[6] && \
1460 (a)[5] == (b)[5] && \
1461 (a)[4] == (b)[4] && \
1462 (a)[3] == (b)[3] && \
1463 (a)[2] == (b)[2] && \
1464 (a)[1] == (b)[1] && \
1465 (a)[0] == (b)[0])
1466
1467static void fixup_botched_add(struct ctlr_info *h,
1468 struct hpsa_scsi_dev_t *added)
1469{
1470
1471
1472
1473 unsigned long flags;
1474 int i, j;
1475
1476 spin_lock_irqsave(&h->lock, flags);
1477 for (i = 0; i < h->ndevices; i++) {
1478 if (h->dev[i] == added) {
1479 for (j = i; j < h->ndevices-1; j++)
1480 h->dev[j] = h->dev[j+1];
1481 h->ndevices--;
1482 break;
1483 }
1484 }
1485 spin_unlock_irqrestore(&h->lock, flags);
1486 kfree(added);
1487}
1488
1489static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1490 struct hpsa_scsi_dev_t *dev2)
1491{
1492
1493
1494
1495
1496 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1497 sizeof(dev1->scsi3addr)) != 0)
1498 return 0;
1499 if (memcmp(dev1->device_id, dev2->device_id,
1500 sizeof(dev1->device_id)) != 0)
1501 return 0;
1502 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1503 return 0;
1504 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1505 return 0;
1506 if (dev1->devtype != dev2->devtype)
1507 return 0;
1508 if (dev1->bus != dev2->bus)
1509 return 0;
1510 return 1;
1511}
1512
1513static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1514 struct hpsa_scsi_dev_t *dev2)
1515{
1516
1517
1518
1519
1520 if (dev1->raid_level != dev2->raid_level)
1521 return 1;
1522 if (dev1->offload_config != dev2->offload_config)
1523 return 1;
1524 if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled)
1525 return 1;
1526 if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1527 if (dev1->queue_depth != dev2->queue_depth)
1528 return 1;
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538 if (dev1->ioaccel_handle != dev2->ioaccel_handle)
1539 return 1;
1540 return 0;
1541}
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1552 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1553 int *index)
1554{
1555 int i;
1556#define DEVICE_NOT_FOUND 0
1557#define DEVICE_CHANGED 1
1558#define DEVICE_SAME 2
1559#define DEVICE_UPDATED 3
1560 if (needle == NULL)
1561 return DEVICE_NOT_FOUND;
1562
1563 for (i = 0; i < haystack_size; i++) {
1564 if (haystack[i] == NULL)
1565 continue;
1566 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1567 *index = i;
1568 if (device_is_the_same(needle, haystack[i])) {
1569 if (device_updated(needle, haystack[i]))
1570 return DEVICE_UPDATED;
1571 return DEVICE_SAME;
1572 } else {
1573
1574 if (needle->volume_offline)
1575 return DEVICE_NOT_FOUND;
1576 return DEVICE_CHANGED;
1577 }
1578 }
1579 }
1580 *index = -1;
1581 return DEVICE_NOT_FOUND;
1582}
1583
1584static void hpsa_monitor_offline_device(struct ctlr_info *h,
1585 unsigned char scsi3addr[])
1586{
1587 struct offline_device_entry *device;
1588 unsigned long flags;
1589
1590
1591 spin_lock_irqsave(&h->offline_device_lock, flags);
1592 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1593 if (memcmp(device->scsi3addr, scsi3addr,
1594 sizeof(device->scsi3addr)) == 0) {
1595 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1596 return;
1597 }
1598 }
1599 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1600
1601
1602 device = kmalloc(sizeof(*device), GFP_KERNEL);
1603 if (!device)
1604 return;
1605
1606 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1607 spin_lock_irqsave(&h->offline_device_lock, flags);
1608 list_add_tail(&device->offline_list, &h->offline_device_list);
1609 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1610}
1611
1612
1613static void hpsa_show_volume_status(struct ctlr_info *h,
1614 struct hpsa_scsi_dev_t *sd)
1615{
1616 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1617 dev_info(&h->pdev->dev,
1618 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1619 h->scsi_host->host_no,
1620 sd->bus, sd->target, sd->lun);
1621 switch (sd->volume_offline) {
1622 case HPSA_LV_OK:
1623 break;
1624 case HPSA_LV_UNDERGOING_ERASE:
1625 dev_info(&h->pdev->dev,
1626 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1627 h->scsi_host->host_no,
1628 sd->bus, sd->target, sd->lun);
1629 break;
1630 case HPSA_LV_NOT_AVAILABLE:
1631 dev_info(&h->pdev->dev,
1632 "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1633 h->scsi_host->host_no,
1634 sd->bus, sd->target, sd->lun);
1635 break;
1636 case HPSA_LV_UNDERGOING_RPI:
1637 dev_info(&h->pdev->dev,
1638 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1639 h->scsi_host->host_no,
1640 sd->bus, sd->target, sd->lun);
1641 break;
1642 case HPSA_LV_PENDING_RPI:
1643 dev_info(&h->pdev->dev,
1644 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1645 h->scsi_host->host_no,
1646 sd->bus, sd->target, sd->lun);
1647 break;
1648 case HPSA_LV_ENCRYPTED_NO_KEY:
1649 dev_info(&h->pdev->dev,
1650 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1651 h->scsi_host->host_no,
1652 sd->bus, sd->target, sd->lun);
1653 break;
1654 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1655 dev_info(&h->pdev->dev,
1656 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1657 h->scsi_host->host_no,
1658 sd->bus, sd->target, sd->lun);
1659 break;
1660 case HPSA_LV_UNDERGOING_ENCRYPTION:
1661 dev_info(&h->pdev->dev,
1662 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1663 h->scsi_host->host_no,
1664 sd->bus, sd->target, sd->lun);
1665 break;
1666 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1667 dev_info(&h->pdev->dev,
1668 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1669 h->scsi_host->host_no,
1670 sd->bus, sd->target, sd->lun);
1671 break;
1672 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1673 dev_info(&h->pdev->dev,
1674 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1675 h->scsi_host->host_no,
1676 sd->bus, sd->target, sd->lun);
1677 break;
1678 case HPSA_LV_PENDING_ENCRYPTION:
1679 dev_info(&h->pdev->dev,
1680 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1681 h->scsi_host->host_no,
1682 sd->bus, sd->target, sd->lun);
1683 break;
1684 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1685 dev_info(&h->pdev->dev,
1686 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1687 h->scsi_host->host_no,
1688 sd->bus, sd->target, sd->lun);
1689 break;
1690 }
1691}
1692
1693
1694
1695
1696
1697static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1698 struct hpsa_scsi_dev_t *dev[], int ndevices,
1699 struct hpsa_scsi_dev_t *logical_drive)
1700{
1701 struct raid_map_data *map = &logical_drive->raid_map;
1702 struct raid_map_disk_data *dd = &map->data[0];
1703 int i, j;
1704 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1705 le16_to_cpu(map->metadata_disks_per_row);
1706 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1707 le16_to_cpu(map->layout_map_count) *
1708 total_disks_per_row;
1709 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1710 total_disks_per_row;
1711 int qdepth;
1712
1713 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1714 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1715
1716 logical_drive->nphysical_disks = nraid_map_entries;
1717
1718 qdepth = 0;
1719 for (i = 0; i < nraid_map_entries; i++) {
1720 logical_drive->phys_disk[i] = NULL;
1721 if (!logical_drive->offload_config)
1722 continue;
1723 for (j = 0; j < ndevices; j++) {
1724 if (dev[j] == NULL)
1725 continue;
1726 if (dev[j]->devtype != TYPE_DISK &&
1727 dev[j]->devtype != TYPE_ZBC)
1728 continue;
1729 if (is_logical_device(dev[j]))
1730 continue;
1731 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1732 continue;
1733
1734 logical_drive->phys_disk[i] = dev[j];
1735 if (i < nphys_disk)
1736 qdepth = min(h->nr_cmds, qdepth +
1737 logical_drive->phys_disk[i]->queue_depth);
1738 break;
1739 }
1740
1741
1742
1743
1744
1745
1746
1747
1748 if (!logical_drive->phys_disk[i]) {
1749 dev_warn(&h->pdev->dev,
1750 "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n",
1751 __func__,
1752 h->scsi_host->host_no, logical_drive->bus,
1753 logical_drive->target, logical_drive->lun);
1754 hpsa_turn_off_ioaccel_for_device(logical_drive);
1755 logical_drive->queue_depth = 8;
1756 }
1757 }
1758 if (nraid_map_entries)
1759
1760
1761
1762
1763 logical_drive->queue_depth = qdepth;
1764 else {
1765 if (logical_drive->external)
1766 logical_drive->queue_depth = EXTERNAL_QD;
1767 else
1768 logical_drive->queue_depth = h->nr_cmds;
1769 }
1770}
1771
1772static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1773 struct hpsa_scsi_dev_t *dev[], int ndevices)
1774{
1775 int i;
1776
1777 for (i = 0; i < ndevices; i++) {
1778 if (dev[i] == NULL)
1779 continue;
1780 if (dev[i]->devtype != TYPE_DISK &&
1781 dev[i]->devtype != TYPE_ZBC)
1782 continue;
1783 if (!is_logical_device(dev[i]))
1784 continue;
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805 if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled)
1806 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1807 }
1808}
1809
1810static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1811{
1812 int rc = 0;
1813
1814 if (!h->scsi_host)
1815 return 1;
1816
1817 if (is_logical_device(device))
1818 rc = scsi_add_device(h->scsi_host, device->bus,
1819 device->target, device->lun);
1820 else
1821 rc = hpsa_add_sas_device(h->sas_host, device);
1822
1823 return rc;
1824}
1825
1826static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
1827 struct hpsa_scsi_dev_t *dev)
1828{
1829 int i;
1830 int count = 0;
1831
1832 for (i = 0; i < h->nr_cmds; i++) {
1833 struct CommandList *c = h->cmd_pool + i;
1834 int refcount = atomic_inc_return(&c->refcount);
1835
1836 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev,
1837 dev->scsi3addr)) {
1838 unsigned long flags;
1839
1840 spin_lock_irqsave(&h->lock, flags);
1841 if (!hpsa_is_cmd_idle(c))
1842 ++count;
1843 spin_unlock_irqrestore(&h->lock, flags);
1844 }
1845
1846 cmd_free(h, c);
1847 }
1848
1849 return count;
1850}
1851
1852#define NUM_WAIT 20
1853static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
1854 struct hpsa_scsi_dev_t *device)
1855{
1856 int cmds = 0;
1857 int waits = 0;
1858 int num_wait = NUM_WAIT;
1859
1860 if (device->external)
1861 num_wait = HPSA_EH_PTRAID_TIMEOUT;
1862
1863 while (1) {
1864 cmds = hpsa_find_outstanding_commands_for_dev(h, device);
1865 if (cmds == 0)
1866 break;
1867 if (++waits > num_wait)
1868 break;
1869 msleep(1000);
1870 }
1871
1872 if (waits > num_wait) {
1873 dev_warn(&h->pdev->dev,
1874 "%s: removing device [%d:%d:%d:%d] with %d outstanding commands!\n",
1875 __func__,
1876 h->scsi_host->host_no,
1877 device->bus, device->target, device->lun, cmds);
1878 }
1879}
1880
1881static void hpsa_remove_device(struct ctlr_info *h,
1882 struct hpsa_scsi_dev_t *device)
1883{
1884 struct scsi_device *sdev = NULL;
1885
1886 if (!h->scsi_host)
1887 return;
1888
1889
1890
1891
1892 device->removed = 1;
1893 hpsa_wait_for_outstanding_commands_for_dev(h, device);
1894
1895 if (is_logical_device(device)) {
1896 sdev = scsi_device_lookup(h->scsi_host, device->bus,
1897 device->target, device->lun);
1898 if (sdev) {
1899 scsi_remove_device(sdev);
1900 scsi_device_put(sdev);
1901 } else {
1902
1903
1904
1905
1906
1907 hpsa_show_dev_msg(KERN_WARNING, h, device,
1908 "didn't find device for removal.");
1909 }
1910 } else {
1911
1912 hpsa_remove_sas_device(device);
1913 }
1914}
1915
1916static void adjust_hpsa_scsi_table(struct ctlr_info *h,
1917 struct hpsa_scsi_dev_t *sd[], int nsds)
1918{
1919
1920
1921
1922
1923 int i, entry, device_change, changes = 0;
1924 struct hpsa_scsi_dev_t *csd;
1925 unsigned long flags;
1926 struct hpsa_scsi_dev_t **added, **removed;
1927 int nadded, nremoved;
1928
1929
1930
1931
1932
1933 spin_lock_irqsave(&h->reset_lock, flags);
1934 if (h->reset_in_progress) {
1935 h->drv_req_rescan = 1;
1936 spin_unlock_irqrestore(&h->reset_lock, flags);
1937 return;
1938 }
1939 spin_unlock_irqrestore(&h->reset_lock, flags);
1940
1941 added = kcalloc(HPSA_MAX_DEVICES, sizeof(*added), GFP_KERNEL);
1942 removed = kcalloc(HPSA_MAX_DEVICES, sizeof(*removed), GFP_KERNEL);
1943
1944 if (!added || !removed) {
1945 dev_warn(&h->pdev->dev, "out of memory in "
1946 "adjust_hpsa_scsi_table\n");
1947 goto free_and_out;
1948 }
1949
1950 spin_lock_irqsave(&h->devlock, flags);
1951
1952
1953
1954
1955
1956
1957
1958
1959 i = 0;
1960 nremoved = 0;
1961 nadded = 0;
1962 while (i < h->ndevices) {
1963 csd = h->dev[i];
1964 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1965 if (device_change == DEVICE_NOT_FOUND) {
1966 changes++;
1967 hpsa_scsi_remove_entry(h, i, removed, &nremoved);
1968 continue;
1969 } else if (device_change == DEVICE_CHANGED) {
1970 changes++;
1971 hpsa_scsi_replace_entry(h, i, sd[entry],
1972 added, &nadded, removed, &nremoved);
1973
1974
1975
1976 sd[entry] = NULL;
1977 } else if (device_change == DEVICE_UPDATED) {
1978 hpsa_scsi_update_entry(h, i, sd[entry]);
1979 }
1980 i++;
1981 }
1982
1983
1984
1985
1986
1987 for (i = 0; i < nsds; i++) {
1988 if (!sd[i])
1989 continue;
1990
1991
1992
1993
1994
1995
1996 if (sd[i]->volume_offline) {
1997 hpsa_show_volume_status(h, sd[i]);
1998 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1999 continue;
2000 }
2001
2002 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
2003 h->ndevices, &entry);
2004 if (device_change == DEVICE_NOT_FOUND) {
2005 changes++;
2006 if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
2007 break;
2008 sd[i] = NULL;
2009 } else if (device_change == DEVICE_CHANGED) {
2010
2011 changes++;
2012 dev_warn(&h->pdev->dev,
2013 "device unexpectedly changed.\n");
2014
2015 }
2016 }
2017 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027 for (i = 0; i < h->ndevices; i++) {
2028 if (h->dev[i] == NULL)
2029 continue;
2030 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
2031 }
2032
2033 spin_unlock_irqrestore(&h->devlock, flags);
2034
2035
2036
2037
2038
2039 for (i = 0; i < nsds; i++) {
2040 if (!sd[i])
2041 continue;
2042 if (sd[i]->volume_offline)
2043 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
2044 }
2045
2046
2047
2048
2049
2050 if (!changes)
2051 goto free_and_out;
2052
2053
2054 for (i = 0; i < nremoved; i++) {
2055 if (removed[i] == NULL)
2056 continue;
2057 if (removed[i]->expose_device)
2058 hpsa_remove_device(h, removed[i]);
2059 kfree(removed[i]);
2060 removed[i] = NULL;
2061 }
2062
2063
2064 for (i = 0; i < nadded; i++) {
2065 int rc = 0;
2066
2067 if (added[i] == NULL)
2068 continue;
2069 if (!(added[i]->expose_device))
2070 continue;
2071 rc = hpsa_add_device(h, added[i]);
2072 if (!rc)
2073 continue;
2074 dev_warn(&h->pdev->dev,
2075 "addition failed %d, device not added.", rc);
2076
2077
2078
2079 fixup_botched_add(h, added[i]);
2080 h->drv_req_rescan = 1;
2081 }
2082
2083free_and_out:
2084 kfree(added);
2085 kfree(removed);
2086}
2087
2088
2089
2090
2091
2092static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
2093 int bus, int target, int lun)
2094{
2095 int i;
2096 struct hpsa_scsi_dev_t *sd;
2097
2098 for (i = 0; i < h->ndevices; i++) {
2099 sd = h->dev[i];
2100 if (sd->bus == bus && sd->target == target && sd->lun == lun)
2101 return sd;
2102 }
2103 return NULL;
2104}
2105
2106static int hpsa_slave_alloc(struct scsi_device *sdev)
2107{
2108 struct hpsa_scsi_dev_t *sd = NULL;
2109 unsigned long flags;
2110 struct ctlr_info *h;
2111
2112 h = sdev_to_hba(sdev);
2113 spin_lock_irqsave(&h->devlock, flags);
2114 if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) {
2115 struct scsi_target *starget;
2116 struct sas_rphy *rphy;
2117
2118 starget = scsi_target(sdev);
2119 rphy = target_to_rphy(starget);
2120 sd = hpsa_find_device_by_sas_rphy(h, rphy);
2121 if (sd) {
2122 sd->target = sdev_id(sdev);
2123 sd->lun = sdev->lun;
2124 }
2125 }
2126 if (!sd)
2127 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
2128 sdev_id(sdev), sdev->lun);
2129
2130 if (sd && sd->expose_device) {
2131 atomic_set(&sd->ioaccel_cmds_out, 0);
2132 sdev->hostdata = sd;
2133 } else
2134 sdev->hostdata = NULL;
2135 spin_unlock_irqrestore(&h->devlock, flags);
2136 return 0;
2137}
2138
2139
2140#define CTLR_TIMEOUT (120 * HZ)
2141static int hpsa_slave_configure(struct scsi_device *sdev)
2142{
2143 struct hpsa_scsi_dev_t *sd;
2144 int queue_depth;
2145
2146 sd = sdev->hostdata;
2147 sdev->no_uld_attach = !sd || !sd->expose_device;
2148
2149 if (sd) {
2150 sd->was_removed = 0;
2151 queue_depth = sd->queue_depth != 0 ?
2152 sd->queue_depth : sdev->host->can_queue;
2153 if (sd->external) {
2154 queue_depth = EXTERNAL_QD;
2155 sdev->eh_timeout = HPSA_EH_PTRAID_TIMEOUT;
2156 blk_queue_rq_timeout(sdev->request_queue,
2157 HPSA_EH_PTRAID_TIMEOUT);
2158 }
2159 if (is_hba_lunid(sd->scsi3addr)) {
2160 sdev->eh_timeout = CTLR_TIMEOUT;
2161 blk_queue_rq_timeout(sdev->request_queue, CTLR_TIMEOUT);
2162 }
2163 } else {
2164 queue_depth = sdev->host->can_queue;
2165 }
2166
2167 scsi_change_queue_depth(sdev, queue_depth);
2168
2169 return 0;
2170}
2171
2172static void hpsa_slave_destroy(struct scsi_device *sdev)
2173{
2174 struct hpsa_scsi_dev_t *hdev = NULL;
2175
2176 hdev = sdev->hostdata;
2177
2178 if (hdev)
2179 hdev->was_removed = 1;
2180}
2181
2182static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2183{
2184 int i;
2185
2186 if (!h->ioaccel2_cmd_sg_list)
2187 return;
2188 for (i = 0; i < h->nr_cmds; i++) {
2189 kfree(h->ioaccel2_cmd_sg_list[i]);
2190 h->ioaccel2_cmd_sg_list[i] = NULL;
2191 }
2192 kfree(h->ioaccel2_cmd_sg_list);
2193 h->ioaccel2_cmd_sg_list = NULL;
2194}
2195
2196static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2197{
2198 int i;
2199
2200 if (h->chainsize <= 0)
2201 return 0;
2202
2203 h->ioaccel2_cmd_sg_list =
2204 kcalloc(h->nr_cmds, sizeof(*h->ioaccel2_cmd_sg_list),
2205 GFP_KERNEL);
2206 if (!h->ioaccel2_cmd_sg_list)
2207 return -ENOMEM;
2208 for (i = 0; i < h->nr_cmds; i++) {
2209 h->ioaccel2_cmd_sg_list[i] =
2210 kmalloc_array(h->maxsgentries,
2211 sizeof(*h->ioaccel2_cmd_sg_list[i]),
2212 GFP_KERNEL);
2213 if (!h->ioaccel2_cmd_sg_list[i])
2214 goto clean;
2215 }
2216 return 0;
2217
2218clean:
2219 hpsa_free_ioaccel2_sg_chain_blocks(h);
2220 return -ENOMEM;
2221}
2222
2223static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
2224{
2225 int i;
2226
2227 if (!h->cmd_sg_list)
2228 return;
2229 for (i = 0; i < h->nr_cmds; i++) {
2230 kfree(h->cmd_sg_list[i]);
2231 h->cmd_sg_list[i] = NULL;
2232 }
2233 kfree(h->cmd_sg_list);
2234 h->cmd_sg_list = NULL;
2235}
2236
2237static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
2238{
2239 int i;
2240
2241 if (h->chainsize <= 0)
2242 return 0;
2243
2244 h->cmd_sg_list = kcalloc(h->nr_cmds, sizeof(*h->cmd_sg_list),
2245 GFP_KERNEL);
2246 if (!h->cmd_sg_list)
2247 return -ENOMEM;
2248
2249 for (i = 0; i < h->nr_cmds; i++) {
2250 h->cmd_sg_list[i] = kmalloc_array(h->chainsize,
2251 sizeof(*h->cmd_sg_list[i]),
2252 GFP_KERNEL);
2253 if (!h->cmd_sg_list[i])
2254 goto clean;
2255
2256 }
2257 return 0;
2258
2259clean:
2260 hpsa_free_sg_chain_blocks(h);
2261 return -ENOMEM;
2262}
2263
2264static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
2265 struct io_accel2_cmd *cp, struct CommandList *c)
2266{
2267 struct ioaccel2_sg_element *chain_block;
2268 u64 temp64;
2269 u32 chain_size;
2270
2271 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
2272 chain_size = le32_to_cpu(cp->sg[0].length);
2273 temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size,
2274 DMA_TO_DEVICE);
2275 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2276
2277 cp->sg->address = 0;
2278 return -1;
2279 }
2280 cp->sg->address = cpu_to_le64(temp64);
2281 return 0;
2282}
2283
2284static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
2285 struct io_accel2_cmd *cp)
2286{
2287 struct ioaccel2_sg_element *chain_sg;
2288 u64 temp64;
2289 u32 chain_size;
2290
2291 chain_sg = cp->sg;
2292 temp64 = le64_to_cpu(chain_sg->address);
2293 chain_size = le32_to_cpu(cp->sg[0].length);
2294 dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE);
2295}
2296
2297static int hpsa_map_sg_chain_block(struct ctlr_info *h,
2298 struct CommandList *c)
2299{
2300 struct SGDescriptor *chain_sg, *chain_block;
2301 u64 temp64;
2302 u32 chain_len;
2303
2304 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2305 chain_block = h->cmd_sg_list[c->cmdindex];
2306 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
2307 chain_len = sizeof(*chain_sg) *
2308 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
2309 chain_sg->Len = cpu_to_le32(chain_len);
2310 temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len,
2311 DMA_TO_DEVICE);
2312 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2313
2314 chain_sg->Addr = cpu_to_le64(0);
2315 return -1;
2316 }
2317 chain_sg->Addr = cpu_to_le64(temp64);
2318 return 0;
2319}
2320
2321static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
2322 struct CommandList *c)
2323{
2324 struct SGDescriptor *chain_sg;
2325
2326 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
2327 return;
2328
2329 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2330 dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr),
2331 le32_to_cpu(chain_sg->Len), DMA_TO_DEVICE);
2332}
2333
2334
2335
2336
2337
2338
2339static int handle_ioaccel_mode2_error(struct ctlr_info *h,
2340 struct CommandList *c,
2341 struct scsi_cmnd *cmd,
2342 struct io_accel2_cmd *c2,
2343 struct hpsa_scsi_dev_t *dev)
2344{
2345 int data_len;
2346 int retry = 0;
2347 u32 ioaccel2_resid = 0;
2348
2349 switch (c2->error_data.serv_response) {
2350 case IOACCEL2_SERV_RESPONSE_COMPLETE:
2351 switch (c2->error_data.status) {
2352 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
2353 if (cmd)
2354 cmd->result = 0;
2355 break;
2356 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
2357 cmd->result |= SAM_STAT_CHECK_CONDITION;
2358 if (c2->error_data.data_present !=
2359 IOACCEL2_SENSE_DATA_PRESENT) {
2360 memset(cmd->sense_buffer, 0,
2361 SCSI_SENSE_BUFFERSIZE);
2362 break;
2363 }
2364
2365 data_len = c2->error_data.sense_data_len;
2366 if (data_len > SCSI_SENSE_BUFFERSIZE)
2367 data_len = SCSI_SENSE_BUFFERSIZE;
2368 if (data_len > sizeof(c2->error_data.sense_data_buff))
2369 data_len =
2370 sizeof(c2->error_data.sense_data_buff);
2371 memcpy(cmd->sense_buffer,
2372 c2->error_data.sense_data_buff, data_len);
2373 retry = 1;
2374 break;
2375 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
2376 retry = 1;
2377 break;
2378 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
2379 retry = 1;
2380 break;
2381 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
2382 retry = 1;
2383 break;
2384 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
2385 retry = 1;
2386 break;
2387 default:
2388 retry = 1;
2389 break;
2390 }
2391 break;
2392 case IOACCEL2_SERV_RESPONSE_FAILURE:
2393 switch (c2->error_data.status) {
2394 case IOACCEL2_STATUS_SR_IO_ERROR:
2395 case IOACCEL2_STATUS_SR_IO_ABORTED:
2396 case IOACCEL2_STATUS_SR_OVERRUN:
2397 retry = 1;
2398 break;
2399 case IOACCEL2_STATUS_SR_UNDERRUN:
2400 cmd->result = (DID_OK << 16);
2401 ioaccel2_resid = get_unaligned_le32(
2402 &c2->error_data.resid_cnt[0]);
2403 scsi_set_resid(cmd, ioaccel2_resid);
2404 break;
2405 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
2406 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
2407 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
2408
2409
2410
2411
2412
2413
2414
2415
2416 if (dev->physical_device && dev->expose_device) {
2417 cmd->result = DID_NO_CONNECT << 16;
2418 dev->removed = 1;
2419 h->drv_req_rescan = 1;
2420 dev_warn(&h->pdev->dev,
2421 "%s: device is gone!\n", __func__);
2422 } else
2423
2424
2425
2426
2427
2428 retry = 1;
2429 break;
2430 default:
2431 retry = 1;
2432 }
2433 break;
2434 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
2435 break;
2436 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
2437 break;
2438 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
2439 retry = 1;
2440 break;
2441 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
2442 break;
2443 default:
2444 retry = 1;
2445 break;
2446 }
2447
2448 if (dev->in_reset)
2449 retry = 0;
2450
2451 return retry;
2452}
2453
2454static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2455 struct CommandList *c)
2456{
2457 struct hpsa_scsi_dev_t *dev = c->device;
2458
2459
2460
2461
2462
2463
2464 c->scsi_cmd = SCSI_CMD_IDLE;
2465 mb();
2466 if (dev) {
2467 atomic_dec(&dev->commands_outstanding);
2468 if (dev->in_reset &&
2469 atomic_read(&dev->commands_outstanding) <= 0)
2470 wake_up_all(&h->event_sync_wait_queue);
2471 }
2472}
2473
2474static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2475 struct CommandList *c)
2476{
2477 hpsa_cmd_resolve_events(h, c);
2478 cmd_tagged_free(h, c);
2479}
2480
2481static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2482 struct CommandList *c, struct scsi_cmnd *cmd)
2483{
2484 hpsa_cmd_resolve_and_free(h, c);
2485 if (cmd && cmd->scsi_done)
2486 cmd->scsi_done(cmd);
2487}
2488
2489static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2490{
2491 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2492 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2493}
2494
2495static void process_ioaccel2_completion(struct ctlr_info *h,
2496 struct CommandList *c, struct scsi_cmnd *cmd,
2497 struct hpsa_scsi_dev_t *dev)
2498{
2499 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2500
2501
2502 if (likely(c2->error_data.serv_response == 0 &&
2503 c2->error_data.status == 0)) {
2504 cmd->result = 0;
2505 return hpsa_cmd_free_and_done(h, c, cmd);
2506 }
2507
2508
2509
2510
2511
2512
2513 if (is_logical_device(dev) &&
2514 c2->error_data.serv_response ==
2515 IOACCEL2_SERV_RESPONSE_FAILURE) {
2516 if (c2->error_data.status ==
2517 IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
2518 hpsa_turn_off_ioaccel_for_device(dev);
2519 }
2520
2521 if (dev->in_reset) {
2522 cmd->result = DID_RESET << 16;
2523 return hpsa_cmd_free_and_done(h, c, cmd);
2524 }
2525
2526 return hpsa_retry_cmd(h, c);
2527 }
2528
2529 if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev))
2530 return hpsa_retry_cmd(h, c);
2531
2532 return hpsa_cmd_free_and_done(h, c, cmd);
2533}
2534
2535
2536static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2537 struct CommandList *cp)
2538{
2539 u8 tmf_status = cp->err_info->ScsiStatus;
2540
2541 switch (tmf_status) {
2542 case CISS_TMF_COMPLETE:
2543
2544
2545
2546
2547 case CISS_TMF_SUCCESS:
2548 return 0;
2549 case CISS_TMF_INVALID_FRAME:
2550 case CISS_TMF_NOT_SUPPORTED:
2551 case CISS_TMF_FAILED:
2552 case CISS_TMF_WRONG_LUN:
2553 case CISS_TMF_OVERLAPPED_TAG:
2554 break;
2555 default:
2556 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2557 tmf_status);
2558 break;
2559 }
2560 return -tmf_status;
2561}
2562
2563static void complete_scsi_command(struct CommandList *cp)
2564{
2565 struct scsi_cmnd *cmd;
2566 struct ctlr_info *h;
2567 struct ErrorInfo *ei;
2568 struct hpsa_scsi_dev_t *dev;
2569 struct io_accel2_cmd *c2;
2570
2571 u8 sense_key;
2572 u8 asc;
2573 u8 ascq;
2574 unsigned long sense_data_size;
2575
2576 ei = cp->err_info;
2577 cmd = cp->scsi_cmd;
2578 h = cp->h;
2579
2580 if (!cmd->device) {
2581 cmd->result = DID_NO_CONNECT << 16;
2582 return hpsa_cmd_free_and_done(h, cp, cmd);
2583 }
2584
2585 dev = cmd->device->hostdata;
2586 if (!dev) {
2587 cmd->result = DID_NO_CONNECT << 16;
2588 return hpsa_cmd_free_and_done(h, cp, cmd);
2589 }
2590 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2591
2592 scsi_dma_unmap(cmd);
2593 if ((cp->cmd_type == CMD_SCSI) &&
2594 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
2595 hpsa_unmap_sg_chain_block(h, cp);
2596
2597 if ((cp->cmd_type == CMD_IOACCEL2) &&
2598 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2599 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2600
2601 cmd->result = (DID_OK << 16);
2602
2603
2604 if (dev->was_removed) {
2605 hpsa_cmd_resolve_and_free(h, cp);
2606 return;
2607 }
2608
2609 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
2610 if (dev->physical_device && dev->expose_device &&
2611 dev->removed) {
2612 cmd->result = DID_NO_CONNECT << 16;
2613 return hpsa_cmd_free_and_done(h, cp, cmd);
2614 }
2615 if (likely(cp->phys_disk != NULL))
2616 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2617 }
2618
2619
2620
2621
2622
2623
2624 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2625
2626 cmd->result = DID_NO_CONNECT << 16;
2627 return hpsa_cmd_free_and_done(h, cp, cmd);
2628 }
2629
2630 if (cp->cmd_type == CMD_IOACCEL2)
2631 return process_ioaccel2_completion(h, cp, cmd, dev);
2632
2633 scsi_set_resid(cmd, ei->ResidualCnt);
2634 if (ei->CommandStatus == 0)
2635 return hpsa_cmd_free_and_done(h, cp, cmd);
2636
2637
2638
2639
2640 if (cp->cmd_type == CMD_IOACCEL1) {
2641 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2642 cp->Header.SGList = scsi_sg_count(cmd);
2643 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2644 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2645 IOACCEL1_IOFLAGS_CDBLEN_MASK;
2646 cp->Header.tag = c->tag;
2647 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2648 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
2649
2650
2651
2652
2653
2654 if (is_logical_device(dev)) {
2655 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2656 dev->offload_enabled = 0;
2657 return hpsa_retry_cmd(h, cp);
2658 }
2659 }
2660
2661
2662 switch (ei->CommandStatus) {
2663
2664 case CMD_TARGET_STATUS:
2665 cmd->result |= ei->ScsiStatus;
2666
2667 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2668 sense_data_size = SCSI_SENSE_BUFFERSIZE;
2669 else
2670 sense_data_size = sizeof(ei->SenseInfo);
2671 if (ei->SenseLen < sense_data_size)
2672 sense_data_size = ei->SenseLen;
2673 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2674 if (ei->ScsiStatus)
2675 decode_sense_data(ei->SenseInfo, sense_data_size,
2676 &sense_key, &asc, &ascq);
2677 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2678 switch (sense_key) {
2679 case ABORTED_COMMAND:
2680 cmd->result |= DID_SOFT_ERROR << 16;
2681 break;
2682 case UNIT_ATTENTION:
2683 if (asc == 0x3F && ascq == 0x0E)
2684 h->drv_req_rescan = 1;
2685 break;
2686 case ILLEGAL_REQUEST:
2687 if (asc == 0x25 && ascq == 0x00) {
2688 dev->removed = 1;
2689 cmd->result = DID_NO_CONNECT << 16;
2690 }
2691 break;
2692 }
2693 break;
2694 }
2695
2696
2697
2698 if (ei->ScsiStatus) {
2699 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2700 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2701 "Returning result: 0x%x\n",
2702 cp, ei->ScsiStatus,
2703 sense_key, asc, ascq,
2704 cmd->result);
2705 } else {
2706 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2707 "Returning no connection.\n", cp),
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721 cmd->result = DID_NO_CONNECT << 16;
2722 }
2723 break;
2724
2725 case CMD_DATA_UNDERRUN:
2726 break;
2727 case CMD_DATA_OVERRUN:
2728 dev_warn(&h->pdev->dev,
2729 "CDB %16phN data overrun\n", cp->Request.CDB);
2730 break;
2731 case CMD_INVALID: {
2732
2733
2734
2735
2736
2737
2738
2739
2740 cmd->result = DID_NO_CONNECT << 16;
2741 }
2742 break;
2743 case CMD_PROTOCOL_ERR:
2744 cmd->result = DID_ERROR << 16;
2745 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2746 cp->Request.CDB);
2747 break;
2748 case CMD_HARDWARE_ERR:
2749 cmd->result = DID_ERROR << 16;
2750 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2751 cp->Request.CDB);
2752 break;
2753 case CMD_CONNECTION_LOST:
2754 cmd->result = DID_ERROR << 16;
2755 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2756 cp->Request.CDB);
2757 break;
2758 case CMD_ABORTED:
2759 cmd->result = DID_ABORT << 16;
2760 break;
2761 case CMD_ABORT_FAILED:
2762 cmd->result = DID_ERROR << 16;
2763 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2764 cp->Request.CDB);
2765 break;
2766 case CMD_UNSOLICITED_ABORT:
2767 cmd->result = DID_SOFT_ERROR << 16;
2768 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2769 cp->Request.CDB);
2770 break;
2771 case CMD_TIMEOUT:
2772 cmd->result = DID_TIME_OUT << 16;
2773 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2774 cp->Request.CDB);
2775 break;
2776 case CMD_UNABORTABLE:
2777 cmd->result = DID_ERROR << 16;
2778 dev_warn(&h->pdev->dev, "Command unabortable\n");
2779 break;
2780 case CMD_TMF_STATUS:
2781 if (hpsa_evaluate_tmf_status(h, cp))
2782 cmd->result = DID_ERROR << 16;
2783 break;
2784 case CMD_IOACCEL_DISABLED:
2785
2786
2787
2788 cmd->result = DID_SOFT_ERROR << 16;
2789 dev_warn(&h->pdev->dev,
2790 "cp %p had HP SSD Smart Path error\n", cp);
2791 break;
2792 default:
2793 cmd->result = DID_ERROR << 16;
2794 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2795 cp, ei->CommandStatus);
2796 }
2797
2798 return hpsa_cmd_free_and_done(h, cp, cmd);
2799}
2800
2801static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c,
2802 int sg_used, enum dma_data_direction data_direction)
2803{
2804 int i;
2805
2806 for (i = 0; i < sg_used; i++)
2807 dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr),
2808 le32_to_cpu(c->SG[i].Len),
2809 data_direction);
2810}
2811
2812static int hpsa_map_one(struct pci_dev *pdev,
2813 struct CommandList *cp,
2814 unsigned char *buf,
2815 size_t buflen,
2816 enum dma_data_direction data_direction)
2817{
2818 u64 addr64;
2819
2820 if (buflen == 0 || data_direction == DMA_NONE) {
2821 cp->Header.SGList = 0;
2822 cp->Header.SGTotal = cpu_to_le16(0);
2823 return 0;
2824 }
2825
2826 addr64 = dma_map_single(&pdev->dev, buf, buflen, data_direction);
2827 if (dma_mapping_error(&pdev->dev, addr64)) {
2828
2829 cp->Header.SGList = 0;
2830 cp->Header.SGTotal = cpu_to_le16(0);
2831 return -1;
2832 }
2833 cp->SG[0].Addr = cpu_to_le64(addr64);
2834 cp->SG[0].Len = cpu_to_le32(buflen);
2835 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST);
2836 cp->Header.SGList = 1;
2837 cp->Header.SGTotal = cpu_to_le16(1);
2838 return 0;
2839}
2840
2841#define NO_TIMEOUT ((unsigned long) -1)
2842#define DEFAULT_TIMEOUT 30000
2843static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2844 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2845{
2846 DECLARE_COMPLETION_ONSTACK(wait);
2847
2848 c->waiting = &wait;
2849 __enqueue_cmd_and_start_io(h, c, reply_queue);
2850 if (timeout_msecs == NO_TIMEOUT) {
2851
2852 wait_for_completion_io(&wait);
2853 return IO_OK;
2854 }
2855 if (!wait_for_completion_io_timeout(&wait,
2856 msecs_to_jiffies(timeout_msecs))) {
2857 dev_warn(&h->pdev->dev, "Command timed out.\n");
2858 return -ETIMEDOUT;
2859 }
2860 return IO_OK;
2861}
2862
2863static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2864 int reply_queue, unsigned long timeout_msecs)
2865{
2866 if (unlikely(lockup_detected(h))) {
2867 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2868 return IO_OK;
2869 }
2870 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2871}
2872
2873static u32 lockup_detected(struct ctlr_info *h)
2874{
2875 int cpu;
2876 u32 rc, *lockup_detected;
2877
2878 cpu = get_cpu();
2879 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2880 rc = *lockup_detected;
2881 put_cpu();
2882 return rc;
2883}
2884
2885#define MAX_DRIVER_CMD_RETRIES 25
2886static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2887 struct CommandList *c, enum dma_data_direction data_direction,
2888 unsigned long timeout_msecs)
2889{
2890 int backoff_time = 10, retry_count = 0;
2891 int rc;
2892
2893 do {
2894 memset(c->err_info, 0, sizeof(*c->err_info));
2895 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2896 timeout_msecs);
2897 if (rc)
2898 break;
2899 retry_count++;
2900 if (retry_count > 3) {
2901 msleep(backoff_time);
2902 if (backoff_time < 1000)
2903 backoff_time *= 2;
2904 }
2905 } while ((check_for_unit_attention(h, c) ||
2906 check_for_busy(h, c)) &&
2907 retry_count <= MAX_DRIVER_CMD_RETRIES);
2908 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2909 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2910 rc = -EIO;
2911 return rc;
2912}
2913
2914static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2915 struct CommandList *c)
2916{
2917 const u8 *cdb = c->Request.CDB;
2918 const u8 *lun = c->Header.LUN.LunAddrBytes;
2919
2920 dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
2921 txt, lun, cdb);
2922}
2923
2924static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2925 struct CommandList *cp)
2926{
2927 const struct ErrorInfo *ei = cp->err_info;
2928 struct device *d = &cp->h->pdev->dev;
2929 u8 sense_key, asc, ascq;
2930 int sense_len;
2931
2932 switch (ei->CommandStatus) {
2933 case CMD_TARGET_STATUS:
2934 if (ei->SenseLen > sizeof(ei->SenseInfo))
2935 sense_len = sizeof(ei->SenseInfo);
2936 else
2937 sense_len = ei->SenseLen;
2938 decode_sense_data(ei->SenseInfo, sense_len,
2939 &sense_key, &asc, &ascq);
2940 hpsa_print_cmd(h, "SCSI status", cp);
2941 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2942 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2943 sense_key, asc, ascq);
2944 else
2945 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2946 if (ei->ScsiStatus == 0)
2947 dev_warn(d, "SCSI status is abnormally zero. "
2948 "(probably indicates selection timeout "
2949 "reported incorrectly due to a known "
2950 "firmware bug, circa July, 2001.)\n");
2951 break;
2952 case CMD_DATA_UNDERRUN:
2953 break;
2954 case CMD_DATA_OVERRUN:
2955 hpsa_print_cmd(h, "overrun condition", cp);
2956 break;
2957 case CMD_INVALID: {
2958
2959
2960
2961 hpsa_print_cmd(h, "invalid command", cp);
2962 dev_warn(d, "probably means device no longer present\n");
2963 }
2964 break;
2965 case CMD_PROTOCOL_ERR:
2966 hpsa_print_cmd(h, "protocol error", cp);
2967 break;
2968 case CMD_HARDWARE_ERR:
2969 hpsa_print_cmd(h, "hardware error", cp);
2970 break;
2971 case CMD_CONNECTION_LOST:
2972 hpsa_print_cmd(h, "connection lost", cp);
2973 break;
2974 case CMD_ABORTED:
2975 hpsa_print_cmd(h, "aborted", cp);
2976 break;
2977 case CMD_ABORT_FAILED:
2978 hpsa_print_cmd(h, "abort failed", cp);
2979 break;
2980 case CMD_UNSOLICITED_ABORT:
2981 hpsa_print_cmd(h, "unsolicited abort", cp);
2982 break;
2983 case CMD_TIMEOUT:
2984 hpsa_print_cmd(h, "timed out", cp);
2985 break;
2986 case CMD_UNABORTABLE:
2987 hpsa_print_cmd(h, "unabortable", cp);
2988 break;
2989 case CMD_CTLR_LOCKUP:
2990 hpsa_print_cmd(h, "controller lockup detected", cp);
2991 break;
2992 default:
2993 hpsa_print_cmd(h, "unknown status", cp);
2994 dev_warn(d, "Unknown command status %x\n",
2995 ei->CommandStatus);
2996 }
2997}
2998
2999static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
3000 u8 page, u8 *buf, size_t bufsize)
3001{
3002 int rc = IO_OK;
3003 struct CommandList *c;
3004 struct ErrorInfo *ei;
3005
3006 c = cmd_alloc(h);
3007 if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize,
3008 page, scsi3addr, TYPE_CMD)) {
3009 rc = -1;
3010 goto out;
3011 }
3012 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3013 NO_TIMEOUT);
3014 if (rc)
3015 goto out;
3016 ei = c->err_info;
3017 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3018 hpsa_scsi_interpret_error(h, c);
3019 rc = -1;
3020 }
3021out:
3022 cmd_free(h, c);
3023 return rc;
3024}
3025
3026static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h,
3027 u8 *scsi3addr)
3028{
3029 u8 *buf;
3030 u64 sa = 0;
3031 int rc = 0;
3032
3033 buf = kzalloc(1024, GFP_KERNEL);
3034 if (!buf)
3035 return 0;
3036
3037 rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC,
3038 buf, 1024);
3039
3040 if (rc)
3041 goto out;
3042
3043 sa = get_unaligned_be64(buf+12);
3044
3045out:
3046 kfree(buf);
3047 return sa;
3048}
3049
3050static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
3051 u16 page, unsigned char *buf,
3052 unsigned char bufsize)
3053{
3054 int rc = IO_OK;
3055 struct CommandList *c;
3056 struct ErrorInfo *ei;
3057
3058 c = cmd_alloc(h);
3059
3060 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
3061 page, scsi3addr, TYPE_CMD)) {
3062 rc = -1;
3063 goto out;
3064 }
3065 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3066 NO_TIMEOUT);
3067 if (rc)
3068 goto out;
3069 ei = c->err_info;
3070 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3071 hpsa_scsi_interpret_error(h, c);
3072 rc = -1;
3073 }
3074out:
3075 cmd_free(h, c);
3076 return rc;
3077}
3078
3079static int hpsa_send_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3080 u8 reset_type, int reply_queue)
3081{
3082 int rc = IO_OK;
3083 struct CommandList *c;
3084 struct ErrorInfo *ei;
3085
3086 c = cmd_alloc(h);
3087 c->device = dev;
3088
3089
3090 (void) fill_cmd(c, reset_type, h, NULL, 0, 0, dev->scsi3addr, TYPE_MSG);
3091 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
3092 if (rc) {
3093 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
3094 goto out;
3095 }
3096
3097
3098 ei = c->err_info;
3099 if (ei->CommandStatus != 0) {
3100 hpsa_scsi_interpret_error(h, c);
3101 rc = -1;
3102 }
3103out:
3104 cmd_free(h, c);
3105 return rc;
3106}
3107
3108static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
3109 struct hpsa_scsi_dev_t *dev,
3110 unsigned char *scsi3addr)
3111{
3112 int i;
3113 bool match = false;
3114 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
3115 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
3116
3117 if (hpsa_is_cmd_idle(c))
3118 return false;
3119
3120 switch (c->cmd_type) {
3121 case CMD_SCSI:
3122 case CMD_IOCTL_PEND:
3123 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
3124 sizeof(c->Header.LUN.LunAddrBytes));
3125 break;
3126
3127 case CMD_IOACCEL1:
3128 case CMD_IOACCEL2:
3129 if (c->phys_disk == dev) {
3130
3131 match = true;
3132 } else {
3133
3134
3135
3136
3137 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3138
3139
3140
3141
3142 match = dev->phys_disk[i] == c->phys_disk;
3143 }
3144 }
3145 break;
3146
3147 case IOACCEL2_TMF:
3148 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3149 match = dev->phys_disk[i]->ioaccel_handle ==
3150 le32_to_cpu(ac->it_nexus);
3151 }
3152 break;
3153
3154 case 0:
3155 match = false;
3156 break;
3157
3158 default:
3159 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
3160 c->cmd_type);
3161 BUG();
3162 }
3163
3164 return match;
3165}
3166
3167static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3168 u8 reset_type, int reply_queue)
3169{
3170 int rc = 0;
3171
3172
3173 if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
3174 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
3175 return -EINTR;
3176 }
3177
3178 rc = hpsa_send_reset(h, dev, reset_type, reply_queue);
3179 if (!rc) {
3180
3181 atomic_dec(&dev->commands_outstanding);
3182 wait_event(h->event_sync_wait_queue,
3183 atomic_read(&dev->commands_outstanding) <= 0 ||
3184 lockup_detected(h));
3185 }
3186
3187 if (unlikely(lockup_detected(h))) {
3188 dev_warn(&h->pdev->dev,
3189 "Controller lockup detected during reset wait\n");
3190 rc = -ENODEV;
3191 }
3192
3193 if (!rc)
3194 rc = wait_for_device_to_become_ready(h, dev->scsi3addr, 0);
3195
3196 mutex_unlock(&h->reset_mutex);
3197 return rc;
3198}
3199
3200static void hpsa_get_raid_level(struct ctlr_info *h,
3201 unsigned char *scsi3addr, unsigned char *raid_level)
3202{
3203 int rc;
3204 unsigned char *buf;
3205
3206 *raid_level = RAID_UNKNOWN;
3207 buf = kzalloc(64, GFP_KERNEL);
3208 if (!buf)
3209 return;
3210
3211 if (!hpsa_vpd_page_supported(h, scsi3addr,
3212 HPSA_VPD_LV_DEVICE_GEOMETRY))
3213 goto exit;
3214
3215 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3216 HPSA_VPD_LV_DEVICE_GEOMETRY, buf, 64);
3217
3218 if (rc == 0)
3219 *raid_level = buf[8];
3220 if (*raid_level > RAID_UNKNOWN)
3221 *raid_level = RAID_UNKNOWN;
3222exit:
3223 kfree(buf);
3224 return;
3225}
3226
3227#define HPSA_MAP_DEBUG
3228#ifdef HPSA_MAP_DEBUG
3229static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
3230 struct raid_map_data *map_buff)
3231{
3232 struct raid_map_disk_data *dd = &map_buff->data[0];
3233 int map, row, col;
3234 u16 map_cnt, row_cnt, disks_per_row;
3235
3236 if (rc != 0)
3237 return;
3238
3239
3240 if (h->raid_offload_debug < 2)
3241 return;
3242
3243 dev_info(&h->pdev->dev, "structure_size = %u\n",
3244 le32_to_cpu(map_buff->structure_size));
3245 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
3246 le32_to_cpu(map_buff->volume_blk_size));
3247 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
3248 le64_to_cpu(map_buff->volume_blk_cnt));
3249 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
3250 map_buff->phys_blk_shift);
3251 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
3252 map_buff->parity_rotation_shift);
3253 dev_info(&h->pdev->dev, "strip_size = %u\n",
3254 le16_to_cpu(map_buff->strip_size));
3255 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
3256 le64_to_cpu(map_buff->disk_starting_blk));
3257 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
3258 le64_to_cpu(map_buff->disk_blk_cnt));
3259 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
3260 le16_to_cpu(map_buff->data_disks_per_row));
3261 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
3262 le16_to_cpu(map_buff->metadata_disks_per_row));
3263 dev_info(&h->pdev->dev, "row_cnt = %u\n",
3264 le16_to_cpu(map_buff->row_cnt));
3265 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
3266 le16_to_cpu(map_buff->layout_map_count));
3267 dev_info(&h->pdev->dev, "flags = 0x%x\n",
3268 le16_to_cpu(map_buff->flags));
3269 dev_info(&h->pdev->dev, "encryption = %s\n",
3270 le16_to_cpu(map_buff->flags) &
3271 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
3272 dev_info(&h->pdev->dev, "dekindex = %u\n",
3273 le16_to_cpu(map_buff->dekindex));
3274 map_cnt = le16_to_cpu(map_buff->layout_map_count);
3275 for (map = 0; map < map_cnt; map++) {
3276 dev_info(&h->pdev->dev, "Map%u:\n", map);
3277 row_cnt = le16_to_cpu(map_buff->row_cnt);
3278 for (row = 0; row < row_cnt; row++) {
3279 dev_info(&h->pdev->dev, " Row%u:\n", row);
3280 disks_per_row =
3281 le16_to_cpu(map_buff->data_disks_per_row);
3282 for (col = 0; col < disks_per_row; col++, dd++)
3283 dev_info(&h->pdev->dev,
3284 " D%02u: h=0x%04x xor=%u,%u\n",
3285 col, dd->ioaccel_handle,
3286 dd->xor_mult[0], dd->xor_mult[1]);
3287 disks_per_row =
3288 le16_to_cpu(map_buff->metadata_disks_per_row);
3289 for (col = 0; col < disks_per_row; col++, dd++)
3290 dev_info(&h->pdev->dev,
3291 " M%02u: h=0x%04x xor=%u,%u\n",
3292 col, dd->ioaccel_handle,
3293 dd->xor_mult[0], dd->xor_mult[1]);
3294 }
3295 }
3296}
3297#else
3298static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
3299 __attribute__((unused)) int rc,
3300 __attribute__((unused)) struct raid_map_data *map_buff)
3301{
3302}
3303#endif
3304
3305static int hpsa_get_raid_map(struct ctlr_info *h,
3306 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3307{
3308 int rc = 0;
3309 struct CommandList *c;
3310 struct ErrorInfo *ei;
3311
3312 c = cmd_alloc(h);
3313
3314 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
3315 sizeof(this_device->raid_map), 0,
3316 scsi3addr, TYPE_CMD)) {
3317 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
3318 cmd_free(h, c);
3319 return -1;
3320 }
3321 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3322 NO_TIMEOUT);
3323 if (rc)
3324 goto out;
3325 ei = c->err_info;
3326 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3327 hpsa_scsi_interpret_error(h, c);
3328 rc = -1;
3329 goto out;
3330 }
3331 cmd_free(h, c);
3332
3333
3334 if (le32_to_cpu(this_device->raid_map.structure_size) >
3335 sizeof(this_device->raid_map)) {
3336 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
3337 rc = -1;
3338 }
3339 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
3340 return rc;
3341out:
3342 cmd_free(h, c);
3343 return rc;
3344}
3345
3346static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
3347 unsigned char scsi3addr[], u16 bmic_device_index,
3348 struct bmic_sense_subsystem_info *buf, size_t bufsize)
3349{
3350 int rc = IO_OK;
3351 struct CommandList *c;
3352 struct ErrorInfo *ei;
3353
3354 c = cmd_alloc(h);
3355
3356 rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize,
3357 0, RAID_CTLR_LUNID, TYPE_CMD);
3358 if (rc)
3359 goto out;
3360
3361 c->Request.CDB[2] = bmic_device_index & 0xff;
3362 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3363
3364 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3365 NO_TIMEOUT);
3366 if (rc)
3367 goto out;
3368 ei = c->err_info;
3369 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3370 hpsa_scsi_interpret_error(h, c);
3371 rc = -1;
3372 }
3373out:
3374 cmd_free(h, c);
3375 return rc;
3376}
3377
3378static int hpsa_bmic_id_controller(struct ctlr_info *h,
3379 struct bmic_identify_controller *buf, size_t bufsize)
3380{
3381 int rc = IO_OK;
3382 struct CommandList *c;
3383 struct ErrorInfo *ei;
3384
3385 c = cmd_alloc(h);
3386
3387 rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize,
3388 0, RAID_CTLR_LUNID, TYPE_CMD);
3389 if (rc)
3390 goto out;
3391
3392 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3393 NO_TIMEOUT);
3394 if (rc)
3395 goto out;
3396 ei = c->err_info;
3397 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3398 hpsa_scsi_interpret_error(h, c);
3399 rc = -1;
3400 }
3401out:
3402 cmd_free(h, c);
3403 return rc;
3404}
3405
3406static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
3407 unsigned char scsi3addr[], u16 bmic_device_index,
3408 struct bmic_identify_physical_device *buf, size_t bufsize)
3409{
3410 int rc = IO_OK;
3411 struct CommandList *c;
3412 struct ErrorInfo *ei;
3413
3414 c = cmd_alloc(h);
3415 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
3416 0, RAID_CTLR_LUNID, TYPE_CMD);
3417 if (rc)
3418 goto out;
3419
3420 c->Request.CDB[2] = bmic_device_index & 0xff;
3421 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3422
3423 hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3424 NO_TIMEOUT);
3425 ei = c->err_info;
3426 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3427 hpsa_scsi_interpret_error(h, c);
3428 rc = -1;
3429 }
3430out:
3431 cmd_free(h, c);
3432
3433 return rc;
3434}
3435
3436
3437
3438
3439
3440
3441
3442static void hpsa_get_enclosure_info(struct ctlr_info *h,
3443 unsigned char *scsi3addr,
3444 struct ReportExtendedLUNdata *rlep, int rle_index,
3445 struct hpsa_scsi_dev_t *encl_dev)
3446{
3447 int rc = -1;
3448 struct CommandList *c = NULL;
3449 struct ErrorInfo *ei = NULL;
3450 struct bmic_sense_storage_box_params *bssbp = NULL;
3451 struct bmic_identify_physical_device *id_phys = NULL;
3452 struct ext_report_lun_entry *rle;
3453 u16 bmic_device_index = 0;
3454
3455 if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN)
3456 return;
3457
3458 rle = &rlep->LUN[rle_index];
3459
3460 encl_dev->eli =
3461 hpsa_get_enclosure_logical_identifier(h, scsi3addr);
3462
3463 bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
3464
3465 if (encl_dev->target == -1 || encl_dev->lun == -1) {
3466 rc = IO_OK;
3467 goto out;
3468 }
3469
3470 if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) {
3471 rc = IO_OK;
3472 goto out;
3473 }
3474
3475 bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL);
3476 if (!bssbp)
3477 goto out;
3478
3479 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3480 if (!id_phys)
3481 goto out;
3482
3483 rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index,
3484 id_phys, sizeof(*id_phys));
3485 if (rc) {
3486 dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n",
3487 __func__, encl_dev->external, bmic_device_index);
3488 goto out;
3489 }
3490
3491 c = cmd_alloc(h);
3492
3493 rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp,
3494 sizeof(*bssbp), 0, RAID_CTLR_LUNID, TYPE_CMD);
3495
3496 if (rc)
3497 goto out;
3498
3499 if (id_phys->phys_connector[1] == 'E')
3500 c->Request.CDB[5] = id_phys->box_index;
3501 else
3502 c->Request.CDB[5] = 0;
3503
3504 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3505 NO_TIMEOUT);
3506 if (rc)
3507 goto out;
3508
3509 ei = c->err_info;
3510 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3511 rc = -1;
3512 goto out;
3513 }
3514
3515 encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port;
3516 memcpy(&encl_dev->phys_connector[id_phys->active_path_number],
3517 bssbp->phys_connector, sizeof(bssbp->phys_connector));
3518
3519 rc = IO_OK;
3520out:
3521 kfree(bssbp);
3522 kfree(id_phys);
3523
3524 if (c)
3525 cmd_free(h, c);
3526
3527 if (rc != IO_OK)
3528 hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
3529 "Error, could not get enclosure information");
3530}
3531
3532static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
3533 unsigned char *scsi3addr)
3534{
3535 struct ReportExtendedLUNdata *physdev;
3536 u32 nphysicals;
3537 u64 sa = 0;
3538 int i;
3539
3540 physdev = kzalloc(sizeof(*physdev), GFP_KERNEL);
3541 if (!physdev)
3542 return 0;
3543
3544 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3545 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3546 kfree(physdev);
3547 return 0;
3548 }
3549 nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24;
3550
3551 for (i = 0; i < nphysicals; i++)
3552 if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) {
3553 sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]);
3554 break;
3555 }
3556
3557 kfree(physdev);
3558
3559 return sa;
3560}
3561
3562static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
3563 struct hpsa_scsi_dev_t *dev)
3564{
3565 int rc;
3566 u64 sa = 0;
3567
3568 if (is_hba_lunid(scsi3addr)) {
3569 struct bmic_sense_subsystem_info *ssi;
3570
3571 ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
3572 if (!ssi)
3573 return;
3574
3575 rc = hpsa_bmic_sense_subsystem_information(h,
3576 scsi3addr, 0, ssi, sizeof(*ssi));
3577 if (rc == 0) {
3578 sa = get_unaligned_be64(ssi->primary_world_wide_id);
3579 h->sas_address = sa;
3580 }
3581
3582 kfree(ssi);
3583 } else
3584 sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr);
3585
3586 dev->sas_address = sa;
3587}
3588
3589static void hpsa_ext_ctrl_present(struct ctlr_info *h,
3590 struct ReportExtendedLUNdata *physdev)
3591{
3592 u32 nphysicals;
3593 int i;
3594
3595 if (h->discovery_polling)
3596 return;
3597
3598 nphysicals = (get_unaligned_be32(physdev->LUNListLength) / 24) + 1;
3599
3600 for (i = 0; i < nphysicals; i++) {
3601 if (physdev->LUN[i].device_type ==
3602 BMIC_DEVICE_TYPE_CONTROLLER
3603 && !is_hba_lunid(physdev->LUN[i].lunid)) {
3604 dev_info(&h->pdev->dev,
3605 "External controller present, activate discovery polling and disable rld caching\n");
3606 hpsa_disable_rld_caching(h);
3607 h->discovery_polling = 1;
3608 break;
3609 }
3610 }
3611}
3612
3613
3614static bool hpsa_vpd_page_supported(struct ctlr_info *h,
3615 unsigned char scsi3addr[], u8 page)
3616{
3617 int rc;
3618 int i;
3619 int pages;
3620 unsigned char *buf, bufsize;
3621
3622 buf = kzalloc(256, GFP_KERNEL);
3623 if (!buf)
3624 return false;
3625
3626
3627 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3628 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3629 buf, HPSA_VPD_HEADER_SZ);
3630 if (rc != 0)
3631 goto exit_unsupported;
3632 pages = buf[3];
3633 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
3634 bufsize = pages + HPSA_VPD_HEADER_SZ;
3635 else
3636 bufsize = 255;
3637
3638
3639 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3640 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3641 buf, bufsize);
3642 if (rc != 0)
3643 goto exit_unsupported;
3644
3645 pages = buf[3];
3646 for (i = 1; i <= pages; i++)
3647 if (buf[3 + i] == page)
3648 goto exit_supported;
3649exit_unsupported:
3650 kfree(buf);
3651 return false;
3652exit_supported:
3653 kfree(buf);
3654 return true;
3655}
3656
3657
3658
3659
3660
3661
3662
3663
3664static void hpsa_get_ioaccel_status(struct ctlr_info *h,
3665 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3666{
3667 int rc;
3668 unsigned char *buf;
3669 u8 ioaccel_status;
3670
3671 this_device->offload_config = 0;
3672 this_device->offload_enabled = 0;
3673 this_device->offload_to_be_enabled = 0;
3674
3675 buf = kzalloc(64, GFP_KERNEL);
3676 if (!buf)
3677 return;
3678 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
3679 goto out;
3680 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3681 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
3682 if (rc != 0)
3683 goto out;
3684
3685#define IOACCEL_STATUS_BYTE 4
3686#define OFFLOAD_CONFIGURED_BIT 0x01
3687#define OFFLOAD_ENABLED_BIT 0x02
3688 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
3689 this_device->offload_config =
3690 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
3691 if (this_device->offload_config) {
3692 bool offload_enabled =
3693 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3694
3695
3696
3697 if (offload_enabled) {
3698 rc = hpsa_get_raid_map(h, scsi3addr, this_device);
3699 if (rc)
3700 goto out;
3701 this_device->offload_to_be_enabled = 1;
3702 }
3703 }
3704
3705out:
3706 kfree(buf);
3707 return;
3708}
3709
3710
3711static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
3712 unsigned char *device_id, int index, int buflen)
3713{
3714 int rc;
3715 unsigned char *buf;
3716
3717
3718 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID))
3719 return 1;
3720
3721 buf = kzalloc(64, GFP_KERNEL);
3722 if (!buf)
3723 return -ENOMEM;
3724
3725 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3726 HPSA_VPD_LV_DEVICE_ID, buf, 64);
3727 if (rc == 0) {
3728 if (buflen > 16)
3729 buflen = 16;
3730 memcpy(device_id, &buf[8], buflen);
3731 }
3732
3733 kfree(buf);
3734
3735 return rc;
3736}
3737
3738static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
3739 void *buf, int bufsize,
3740 int extended_response)
3741{
3742 int rc = IO_OK;
3743 struct CommandList *c;
3744 unsigned char scsi3addr[8];
3745 struct ErrorInfo *ei;
3746
3747 c = cmd_alloc(h);
3748
3749
3750 memset(scsi3addr, 0, sizeof(scsi3addr));
3751 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
3752 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
3753 rc = -EAGAIN;
3754 goto out;
3755 }
3756 if (extended_response)
3757 c->Request.CDB[1] = extended_response;
3758 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3759 NO_TIMEOUT);
3760 if (rc)
3761 goto out;
3762 ei = c->err_info;
3763 if (ei->CommandStatus != 0 &&
3764 ei->CommandStatus != CMD_DATA_UNDERRUN) {
3765 hpsa_scsi_interpret_error(h, c);
3766 rc = -EIO;
3767 } else {
3768 struct ReportLUNdata *rld = buf;
3769
3770 if (rld->extended_response_flag != extended_response) {
3771 if (!h->legacy_board) {
3772 dev_err(&h->pdev->dev,
3773 "report luns requested format %u, got %u\n",
3774 extended_response,
3775 rld->extended_response_flag);
3776 rc = -EINVAL;
3777 } else
3778 rc = -EOPNOTSUPP;
3779 }
3780 }
3781out:
3782 cmd_free(h, c);
3783 return rc;
3784}
3785
3786static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
3787 struct ReportExtendedLUNdata *buf, int bufsize)
3788{
3789 int rc;
3790 struct ReportLUNdata *lbuf;
3791
3792 rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3793 HPSA_REPORT_PHYS_EXTENDED);
3794 if (!rc || rc != -EOPNOTSUPP)
3795 return rc;
3796
3797
3798 lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL);
3799 if (!lbuf)
3800 return -ENOMEM;
3801
3802 rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0);
3803 if (!rc) {
3804 int i;
3805 u32 nphys;
3806
3807
3808 memcpy(buf, lbuf, 8);
3809 nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8;
3810 for (i = 0; i < nphys; i++)
3811 memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8);
3812 }
3813 kfree(lbuf);
3814 return rc;
3815}
3816
3817static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
3818 struct ReportLUNdata *buf, int bufsize)
3819{
3820 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
3821}
3822
3823static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
3824 int bus, int target, int lun)
3825{
3826 device->bus = bus;
3827 device->target = target;
3828 device->lun = lun;
3829}
3830
3831
3832static int hpsa_get_volume_status(struct ctlr_info *h,
3833 unsigned char scsi3addr[])
3834{
3835 int rc;
3836 int status;
3837 int size;
3838 unsigned char *buf;
3839
3840 buf = kzalloc(64, GFP_KERNEL);
3841 if (!buf)
3842 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3843
3844
3845 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
3846 goto exit_failed;
3847
3848
3849 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3850 buf, HPSA_VPD_HEADER_SZ);
3851 if (rc != 0)
3852 goto exit_failed;
3853 size = buf[3];
3854
3855
3856 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3857 buf, size + HPSA_VPD_HEADER_SZ);
3858 if (rc != 0)
3859 goto exit_failed;
3860 status = buf[4];
3861
3862 kfree(buf);
3863 return status;
3864exit_failed:
3865 kfree(buf);
3866 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3867}
3868
3869
3870
3871
3872
3873
3874
3875
3876static unsigned char hpsa_volume_offline(struct ctlr_info *h,
3877 unsigned char scsi3addr[])
3878{
3879 struct CommandList *c;
3880 unsigned char *sense;
3881 u8 sense_key, asc, ascq;
3882 int sense_len;
3883 int rc, ldstat = 0;
3884#define ASC_LUN_NOT_READY 0x04
3885#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3886#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3887
3888 c = cmd_alloc(h);
3889
3890 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
3891 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
3892 NO_TIMEOUT);
3893 if (rc) {
3894 cmd_free(h, c);
3895 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3896 }
3897 sense = c->err_info->SenseInfo;
3898 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3899 sense_len = sizeof(c->err_info->SenseInfo);
3900 else
3901 sense_len = c->err_info->SenseLen;
3902 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
3903 cmd_free(h, c);
3904
3905
3906 ldstat = hpsa_get_volume_status(h, scsi3addr);
3907
3908
3909 switch (ldstat) {
3910 case HPSA_LV_FAILED:
3911 case HPSA_LV_UNDERGOING_ERASE:
3912 case HPSA_LV_NOT_AVAILABLE:
3913 case HPSA_LV_UNDERGOING_RPI:
3914 case HPSA_LV_PENDING_RPI:
3915 case HPSA_LV_ENCRYPTED_NO_KEY:
3916 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3917 case HPSA_LV_UNDERGOING_ENCRYPTION:
3918 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3919 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3920 return ldstat;
3921 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3922
3923
3924
3925 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3926 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3927 return ldstat;
3928 break;
3929 default:
3930 break;
3931 }
3932 return HPSA_LV_OK;
3933}
3934
3935static int hpsa_update_device_info(struct ctlr_info *h,
3936 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3937 unsigned char *is_OBDR_device)
3938{
3939
3940#define OBDR_SIG_OFFSET 43
3941#define OBDR_TAPE_SIG "$DR-10"
3942#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3943#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3944
3945 unsigned char *inq_buff;
3946 unsigned char *obdr_sig;
3947 int rc = 0;
3948
3949 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
3950 if (!inq_buff) {
3951 rc = -ENOMEM;
3952 goto bail_out;
3953 }
3954
3955
3956 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3957 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3958 dev_err(&h->pdev->dev,
3959 "%s: inquiry failed, device will be skipped.\n",
3960 __func__);
3961 rc = HPSA_INQUIRY_FAILED;
3962 goto bail_out;
3963 }
3964
3965 scsi_sanitize_inquiry_string(&inq_buff[8], 8);
3966 scsi_sanitize_inquiry_string(&inq_buff[16], 16);
3967
3968 this_device->devtype = (inq_buff[0] & 0x1f);
3969 memcpy(this_device->scsi3addr, scsi3addr, 8);
3970 memcpy(this_device->vendor, &inq_buff[8],
3971 sizeof(this_device->vendor));
3972 memcpy(this_device->model, &inq_buff[16],
3973 sizeof(this_device->model));
3974 this_device->rev = inq_buff[2];
3975 memset(this_device->device_id, 0,
3976 sizeof(this_device->device_id));
3977 if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
3978 sizeof(this_device->device_id)) < 0) {
3979 dev_err(&h->pdev->dev,
3980 "hpsa%d: %s: can't get device id for [%d:%d:%d:%d]\t%s\t%.16s\n",
3981 h->ctlr, __func__,
3982 h->scsi_host->host_no,
3983 this_device->bus, this_device->target,
3984 this_device->lun,
3985 scsi_device_type(this_device->devtype),
3986 this_device->model);
3987 rc = HPSA_LV_FAILED;
3988 goto bail_out;
3989 }
3990
3991 if ((this_device->devtype == TYPE_DISK ||
3992 this_device->devtype == TYPE_ZBC) &&
3993 is_logical_dev_addr_mode(scsi3addr)) {
3994 unsigned char volume_offline;
3995
3996 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
3997 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3998 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
3999 volume_offline = hpsa_volume_offline(h, scsi3addr);
4000 if (volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED &&
4001 h->legacy_board) {
4002
4003
4004
4005 dev_info(&h->pdev->dev,
4006 "C0:T%d:L%d Volume status not available, assuming online.\n",
4007 this_device->target, this_device->lun);
4008 volume_offline = 0;
4009 }
4010 this_device->volume_offline = volume_offline;
4011 if (volume_offline == HPSA_LV_FAILED) {
4012 rc = HPSA_LV_FAILED;
4013 dev_err(&h->pdev->dev,
4014 "%s: LV failed, device will be skipped.\n",
4015 __func__);
4016 goto bail_out;
4017 }
4018 } else {
4019 this_device->raid_level = RAID_UNKNOWN;
4020 this_device->offload_config = 0;
4021 hpsa_turn_off_ioaccel_for_device(this_device);
4022 this_device->hba_ioaccel_enabled = 0;
4023 this_device->volume_offline = 0;
4024 this_device->queue_depth = h->nr_cmds;
4025 }
4026
4027 if (this_device->external)
4028 this_device->queue_depth = EXTERNAL_QD;
4029
4030 if (is_OBDR_device) {
4031
4032
4033
4034 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
4035 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
4036 strncmp(obdr_sig, OBDR_TAPE_SIG,
4037 OBDR_SIG_LEN) == 0);
4038 }
4039 kfree(inq_buff);
4040 return 0;
4041
4042bail_out:
4043 kfree(inq_buff);
4044 return rc;
4045}
4046
4047
4048
4049
4050
4051
4052
4053static void figure_bus_target_lun(struct ctlr_info *h,
4054 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
4055{
4056 u32 lunid = get_unaligned_le32(lunaddrbytes);
4057
4058 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
4059
4060 if (is_hba_lunid(lunaddrbytes)) {
4061 int bus = HPSA_HBA_BUS;
4062
4063 if (!device->rev)
4064 bus = HPSA_LEGACY_HBA_BUS;
4065 hpsa_set_bus_target_lun(device,
4066 bus, 0, lunid & 0x3fff);
4067 } else
4068
4069 hpsa_set_bus_target_lun(device,
4070 HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
4071 return;
4072 }
4073
4074 if (device->external) {
4075 hpsa_set_bus_target_lun(device,
4076 HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff,
4077 lunid & 0x00ff);
4078 return;
4079 }
4080 hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS,
4081 0, lunid & 0x3fff);
4082}
4083
4084static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position,
4085 int i, int nphysicals, int nlocal_logicals)
4086{
4087
4088
4089
4090 int logicals_start = nphysicals + (raid_ctlr_position == 0);
4091
4092 if (i == raid_ctlr_position)
4093 return 0;
4094
4095 if (i < logicals_start)
4096 return 0;
4097
4098
4099 if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals)
4100 return 0;
4101
4102 return 1;
4103}
4104
4105
4106
4107
4108
4109
4110
4111static int hpsa_gather_lun_info(struct ctlr_info *h,
4112 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
4113 struct ReportLUNdata *logdev, u32 *nlogicals)
4114{
4115 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
4116 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
4117 return -1;
4118 }
4119 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
4120 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
4121 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
4122 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
4123 *nphysicals = HPSA_MAX_PHYS_LUN;
4124 }
4125 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
4126 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
4127 return -1;
4128 }
4129 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
4130
4131 if (*nlogicals > HPSA_MAX_LUN) {
4132 dev_warn(&h->pdev->dev,
4133 "maximum logical LUNs (%d) exceeded. "
4134 "%d LUNs ignored.\n", HPSA_MAX_LUN,
4135 *nlogicals - HPSA_MAX_LUN);
4136 *nlogicals = HPSA_MAX_LUN;
4137 }
4138 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
4139 dev_warn(&h->pdev->dev,
4140 "maximum logical + physical LUNs (%d) exceeded. "
4141 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
4142 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
4143 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
4144 }
4145 return 0;
4146}
4147
4148static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
4149 int i, int nphysicals, int nlogicals,
4150 struct ReportExtendedLUNdata *physdev_list,
4151 struct ReportLUNdata *logdev_list)
4152{
4153
4154
4155
4156
4157
4158 int logicals_start = nphysicals + (raid_ctlr_position == 0);
4159 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
4160
4161 if (i == raid_ctlr_position)
4162 return RAID_CTLR_LUNID;
4163
4164 if (i < logicals_start)
4165 return &physdev_list->LUN[i -
4166 (raid_ctlr_position == 0)].lunid[0];
4167
4168 if (i < last_device)
4169 return &logdev_list->LUN[i - nphysicals -
4170 (raid_ctlr_position == 0)][0];
4171 BUG();
4172 return NULL;
4173}
4174
4175
4176static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
4177 struct hpsa_scsi_dev_t *dev,
4178 struct ReportExtendedLUNdata *rlep, int rle_index,
4179 struct bmic_identify_physical_device *id_phys)
4180{
4181 int rc;
4182 struct ext_report_lun_entry *rle;
4183
4184 if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN)
4185 return;
4186
4187 rle = &rlep->LUN[rle_index];
4188
4189 dev->ioaccel_handle = rle->ioaccel_handle;
4190 if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
4191 dev->hba_ioaccel_enabled = 1;
4192 memset(id_phys, 0, sizeof(*id_phys));
4193 rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0],
4194 GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys,
4195 sizeof(*id_phys));
4196 if (!rc)
4197
4198#define DRIVE_CMDS_RESERVED_FOR_FW 2
4199#define DRIVE_QUEUE_DEPTH 7
4200 dev->queue_depth =
4201 le16_to_cpu(id_phys->current_queue_depth_limit) -
4202 DRIVE_CMDS_RESERVED_FOR_FW;
4203 else
4204 dev->queue_depth = DRIVE_QUEUE_DEPTH;
4205}
4206
4207static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
4208 struct ReportExtendedLUNdata *rlep, int rle_index,
4209 struct bmic_identify_physical_device *id_phys)
4210{
4211 struct ext_report_lun_entry *rle;
4212
4213 if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN)
4214 return;
4215
4216 rle = &rlep->LUN[rle_index];
4217
4218 if ((rle->device_flags & 0x08) && this_device->ioaccel_handle)
4219 this_device->hba_ioaccel_enabled = 1;
4220
4221 memcpy(&this_device->active_path_index,
4222 &id_phys->active_path_number,
4223 sizeof(this_device->active_path_index));
4224 memcpy(&this_device->path_map,
4225 &id_phys->redundant_path_present_map,
4226 sizeof(this_device->path_map));
4227 memcpy(&this_device->box,
4228 &id_phys->alternate_paths_phys_box_on_port,
4229 sizeof(this_device->box));
4230 memcpy(&this_device->phys_connector,
4231 &id_phys->alternate_paths_phys_connector,
4232 sizeof(this_device->phys_connector));
4233 memcpy(&this_device->bay,
4234 &id_phys->phys_bay_in_box,
4235 sizeof(this_device->bay));
4236}
4237
4238
4239static int hpsa_set_local_logical_count(struct ctlr_info *h,
4240 struct bmic_identify_controller *id_ctlr,
4241 u32 *nlocals)
4242{
4243 int rc;
4244
4245 if (!id_ctlr) {
4246 dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n",
4247 __func__);
4248 return -ENOMEM;
4249 }
4250 memset(id_ctlr, 0, sizeof(*id_ctlr));
4251 rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
4252 if (!rc)
4253 if (id_ctlr->configured_logical_drive_count < 255)
4254 *nlocals = id_ctlr->configured_logical_drive_count;
4255 else
4256 *nlocals = le16_to_cpu(
4257 id_ctlr->extended_logical_unit_count);
4258 else
4259 *nlocals = -1;
4260 return rc;
4261}
4262
4263static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes)
4264{
4265 struct bmic_identify_physical_device *id_phys;
4266 bool is_spare = false;
4267 int rc;
4268
4269 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4270 if (!id_phys)
4271 return false;
4272
4273 rc = hpsa_bmic_id_physical_device(h,
4274 lunaddrbytes,
4275 GET_BMIC_DRIVE_NUMBER(lunaddrbytes),
4276 id_phys, sizeof(*id_phys));
4277 if (rc == 0)
4278 is_spare = (id_phys->more_flags >> 6) & 0x01;
4279
4280 kfree(id_phys);
4281 return is_spare;
4282}
4283
4284#define RPL_DEV_FLAG_NON_DISK 0x1
4285#define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2
4286#define RPL_DEV_FLAG_UNCONFIG_DISK 0x4
4287
4288#define BMIC_DEVICE_TYPE_ENCLOSURE 6
4289
4290static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes,
4291 struct ext_report_lun_entry *rle)
4292{
4293 u8 device_flags;
4294 u8 device_type;
4295
4296 if (!MASKED_DEVICE(lunaddrbytes))
4297 return false;
4298
4299 device_flags = rle->device_flags;
4300 device_type = rle->device_type;
4301
4302 if (device_flags & RPL_DEV_FLAG_NON_DISK) {
4303 if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE)
4304 return false;
4305 return true;
4306 }
4307
4308 if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED))
4309 return false;
4310
4311 if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK)
4312 return false;
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322 if (hpsa_is_disk_spare(h, lunaddrbytes))
4323 return true;
4324
4325 return false;
4326}
4327
4328static void hpsa_update_scsi_devices(struct ctlr_info *h)
4329{
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340 struct ReportExtendedLUNdata *physdev_list = NULL;
4341 struct ReportLUNdata *logdev_list = NULL;
4342 struct bmic_identify_physical_device *id_phys = NULL;
4343 struct bmic_identify_controller *id_ctlr = NULL;
4344 u32 nphysicals = 0;
4345 u32 nlogicals = 0;
4346 u32 nlocal_logicals = 0;
4347 u32 ndev_allocated = 0;
4348 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
4349 int ncurrent = 0;
4350 int i, ndevs_to_allocate;
4351 int raid_ctlr_position;
4352 bool physical_device;
4353 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
4354
4355 currentsd = kcalloc(HPSA_MAX_DEVICES, sizeof(*currentsd), GFP_KERNEL);
4356 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
4357 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
4358 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
4359 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4360 id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL);
4361
4362 if (!currentsd || !physdev_list || !logdev_list ||
4363 !tmpdevice || !id_phys || !id_ctlr) {
4364 dev_err(&h->pdev->dev, "out of memory\n");
4365 goto out;
4366 }
4367 memset(lunzerobits, 0, sizeof(lunzerobits));
4368
4369 h->drv_req_rescan = 0;
4370
4371 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
4372 logdev_list, &nlogicals)) {
4373 h->drv_req_rescan = 1;
4374 goto out;
4375 }
4376
4377
4378 if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) {
4379 dev_warn(&h->pdev->dev,
4380 "%s: Can't determine number of local logical devices.\n",
4381 __func__);
4382 }
4383
4384
4385
4386
4387
4388 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
4389
4390 hpsa_ext_ctrl_present(h, physdev_list);
4391
4392
4393 for (i = 0; i < ndevs_to_allocate; i++) {
4394 if (i >= HPSA_MAX_DEVICES) {
4395 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
4396 " %d devices ignored.\n", HPSA_MAX_DEVICES,
4397 ndevs_to_allocate - HPSA_MAX_DEVICES);
4398 break;
4399 }
4400
4401 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
4402 if (!currentsd[i]) {
4403 h->drv_req_rescan = 1;
4404 goto out;
4405 }
4406 ndev_allocated++;
4407 }
4408
4409 if (is_scsi_rev_5(h))
4410 raid_ctlr_position = 0;
4411 else
4412 raid_ctlr_position = nphysicals + nlogicals;
4413
4414
4415 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
4416 u8 *lunaddrbytes, is_OBDR = 0;
4417 int rc = 0;
4418 int phys_dev_index = i - (raid_ctlr_position == 0);
4419 bool skip_device = false;
4420
4421 memset(tmpdevice, 0, sizeof(*tmpdevice));
4422
4423 physical_device = i < nphysicals + (raid_ctlr_position == 0);
4424
4425
4426 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
4427 i, nphysicals, nlogicals, physdev_list, logdev_list);
4428
4429
4430 tmpdevice->external =
4431 figure_external_status(h, raid_ctlr_position, i,
4432 nphysicals, nlocal_logicals);
4433
4434
4435
4436
4437 if (phys_dev_index >= 0 && !tmpdevice->external &&
4438 physical_device) {
4439 skip_device = hpsa_skip_device(h, lunaddrbytes,
4440 &physdev_list->LUN[phys_dev_index]);
4441 if (skip_device)
4442 continue;
4443 }
4444
4445
4446 rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
4447 &is_OBDR);
4448 if (rc == -ENOMEM) {
4449 dev_warn(&h->pdev->dev,
4450 "Out of memory, rescan deferred.\n");
4451 h->drv_req_rescan = 1;
4452 goto out;
4453 }
4454 if (rc) {
4455 h->drv_req_rescan = 1;
4456 continue;
4457 }
4458
4459 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
4460 this_device = currentsd[ncurrent];
4461
4462 *this_device = *tmpdevice;
4463 this_device->physical_device = physical_device;
4464
4465
4466
4467
4468
4469 if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device)
4470 this_device->expose_device = 0;
4471 else
4472 this_device->expose_device = 1;
4473
4474
4475
4476
4477
4478 if (this_device->physical_device && this_device->expose_device)
4479 hpsa_get_sas_address(h, lunaddrbytes, this_device);
4480
4481 switch (this_device->devtype) {
4482 case TYPE_ROM:
4483
4484
4485
4486
4487
4488
4489
4490 if (is_OBDR)
4491 ncurrent++;
4492 break;
4493 case TYPE_DISK:
4494 case TYPE_ZBC:
4495 if (this_device->physical_device) {
4496
4497
4498 this_device->offload_enabled = 0;
4499 hpsa_get_ioaccel_drive_info(h, this_device,
4500 physdev_list, phys_dev_index, id_phys);
4501 hpsa_get_path_info(this_device,
4502 physdev_list, phys_dev_index, id_phys);
4503 }
4504 ncurrent++;
4505 break;
4506 case TYPE_TAPE:
4507 case TYPE_MEDIUM_CHANGER:
4508 ncurrent++;
4509 break;
4510 case TYPE_ENCLOSURE:
4511 if (!this_device->external)
4512 hpsa_get_enclosure_info(h, lunaddrbytes,
4513 physdev_list, phys_dev_index,
4514 this_device);
4515 ncurrent++;
4516 break;
4517 case TYPE_RAID:
4518
4519
4520
4521
4522
4523 if (!is_hba_lunid(lunaddrbytes))
4524 break;
4525 ncurrent++;
4526 break;
4527 default:
4528 break;
4529 }
4530 if (ncurrent >= HPSA_MAX_DEVICES)
4531 break;
4532 }
4533
4534 if (h->sas_host == NULL) {
4535 int rc = 0;
4536
4537 rc = hpsa_add_sas_host(h);
4538 if (rc) {
4539 dev_warn(&h->pdev->dev,
4540 "Could not add sas host %d\n", rc);
4541 goto out;
4542 }
4543 }
4544
4545 adjust_hpsa_scsi_table(h, currentsd, ncurrent);
4546out:
4547 kfree(tmpdevice);
4548 for (i = 0; i < ndev_allocated; i++)
4549 kfree(currentsd[i]);
4550 kfree(currentsd);
4551 kfree(physdev_list);
4552 kfree(logdev_list);
4553 kfree(id_ctlr);
4554 kfree(id_phys);
4555}
4556
4557static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
4558 struct scatterlist *sg)
4559{
4560 u64 addr64 = (u64) sg_dma_address(sg);
4561 unsigned int len = sg_dma_len(sg);
4562
4563 desc->Addr = cpu_to_le64(addr64);
4564 desc->Len = cpu_to_le32(len);
4565 desc->Ext = 0;
4566}
4567
4568
4569
4570
4571
4572
4573static int hpsa_scatter_gather(struct ctlr_info *h,
4574 struct CommandList *cp,
4575 struct scsi_cmnd *cmd)
4576{
4577 struct scatterlist *sg;
4578 int use_sg, i, sg_limit, chained;
4579 struct SGDescriptor *curr_sg;
4580
4581 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4582
4583 use_sg = scsi_dma_map(cmd);
4584 if (use_sg < 0)
4585 return use_sg;
4586
4587 if (!use_sg)
4588 goto sglist_finished;
4589
4590
4591
4592
4593
4594
4595
4596
4597 curr_sg = cp->SG;
4598 chained = use_sg > h->max_cmd_sg_entries;
4599 sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
4600 scsi_for_each_sg(cmd, sg, sg_limit, i) {
4601 hpsa_set_sg_descriptor(curr_sg, sg);
4602 curr_sg++;
4603 }
4604
4605 if (chained) {
4606
4607
4608
4609
4610
4611
4612 curr_sg = h->cmd_sg_list[cp->cmdindex];
4613 sg_limit = use_sg - sg_limit;
4614 for_each_sg(sg, sg, sg_limit, i) {
4615 hpsa_set_sg_descriptor(curr_sg, sg);
4616 curr_sg++;
4617 }
4618 }
4619
4620
4621 (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
4622
4623 if (use_sg + chained > h->maxSG)
4624 h->maxSG = use_sg + chained;
4625
4626 if (chained) {
4627 cp->Header.SGList = h->max_cmd_sg_entries;
4628 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
4629 if (hpsa_map_sg_chain_block(h, cp)) {
4630 scsi_dma_unmap(cmd);
4631 return -1;
4632 }
4633 return 0;
4634 }
4635
4636sglist_finished:
4637
4638 cp->Header.SGList = (u8) use_sg;
4639 cp->Header.SGTotal = cpu_to_le16(use_sg);
4640 return 0;
4641}
4642
4643static inline void warn_zero_length_transfer(struct ctlr_info *h,
4644 u8 *cdb, int cdb_len,
4645 const char *func)
4646{
4647 dev_warn(&h->pdev->dev,
4648 "%s: Blocking zero-length request: CDB:%*phN\n",
4649 func, cdb_len, cdb);
4650}
4651
4652#define IO_ACCEL_INELIGIBLE 1
4653
4654static bool is_zero_length_transfer(u8 *cdb)
4655{
4656 u32 block_cnt;
4657
4658
4659 switch (cdb[0]) {
4660 case READ_10:
4661 case WRITE_10:
4662 case VERIFY:
4663 case WRITE_VERIFY:
4664 block_cnt = get_unaligned_be16(&cdb[7]);
4665 break;
4666 case READ_12:
4667 case WRITE_12:
4668 case VERIFY_12:
4669 case WRITE_VERIFY_12:
4670 block_cnt = get_unaligned_be32(&cdb[6]);
4671 break;
4672 case READ_16:
4673 case WRITE_16:
4674 case VERIFY_16:
4675 block_cnt = get_unaligned_be32(&cdb[10]);
4676 break;
4677 default:
4678 return false;
4679 }
4680
4681 return block_cnt == 0;
4682}
4683
4684static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
4685{
4686 int is_write = 0;
4687 u32 block;
4688 u32 block_cnt;
4689
4690
4691 switch (cdb[0]) {
4692 case WRITE_6:
4693 case WRITE_12:
4694 is_write = 1;
4695 fallthrough;
4696 case READ_6:
4697 case READ_12:
4698 if (*cdb_len == 6) {
4699 block = (((cdb[1] & 0x1F) << 16) |
4700 (cdb[2] << 8) |
4701 cdb[3]);
4702 block_cnt = cdb[4];
4703 if (block_cnt == 0)
4704 block_cnt = 256;
4705 } else {
4706 BUG_ON(*cdb_len != 12);
4707 block = get_unaligned_be32(&cdb[2]);
4708 block_cnt = get_unaligned_be32(&cdb[6]);
4709 }
4710 if (block_cnt > 0xffff)
4711 return IO_ACCEL_INELIGIBLE;
4712
4713 cdb[0] = is_write ? WRITE_10 : READ_10;
4714 cdb[1] = 0;
4715 cdb[2] = (u8) (block >> 24);
4716 cdb[3] = (u8) (block >> 16);
4717 cdb[4] = (u8) (block >> 8);
4718 cdb[5] = (u8) (block);
4719 cdb[6] = 0;
4720 cdb[7] = (u8) (block_cnt >> 8);
4721 cdb[8] = (u8) (block_cnt);
4722 cdb[9] = 0;
4723 *cdb_len = 10;
4724 break;
4725 }
4726 return 0;
4727}
4728
4729static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
4730 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4731 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4732{
4733 struct scsi_cmnd *cmd = c->scsi_cmd;
4734 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
4735 unsigned int len;
4736 unsigned int total_len = 0;
4737 struct scatterlist *sg;
4738 u64 addr64;
4739 int use_sg, i;
4740 struct SGDescriptor *curr_sg;
4741 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
4742
4743
4744 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
4745 atomic_dec(&phys_disk->ioaccel_cmds_out);
4746 return IO_ACCEL_INELIGIBLE;
4747 }
4748
4749 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
4750
4751 if (is_zero_length_transfer(cdb)) {
4752 warn_zero_length_transfer(h, cdb, cdb_len, __func__);
4753 atomic_dec(&phys_disk->ioaccel_cmds_out);
4754 return IO_ACCEL_INELIGIBLE;
4755 }
4756
4757 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4758 atomic_dec(&phys_disk->ioaccel_cmds_out);
4759 return IO_ACCEL_INELIGIBLE;
4760 }
4761
4762 c->cmd_type = CMD_IOACCEL1;
4763
4764
4765 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
4766 (c->cmdindex * sizeof(*cp));
4767 BUG_ON(c->busaddr & 0x0000007F);
4768
4769 use_sg = scsi_dma_map(cmd);
4770 if (use_sg < 0) {
4771 atomic_dec(&phys_disk->ioaccel_cmds_out);
4772 return use_sg;
4773 }
4774
4775 if (use_sg) {
4776 curr_sg = cp->SG;
4777 scsi_for_each_sg(cmd, sg, use_sg, i) {
4778 addr64 = (u64) sg_dma_address(sg);
4779 len = sg_dma_len(sg);
4780 total_len += len;
4781 curr_sg->Addr = cpu_to_le64(addr64);
4782 curr_sg->Len = cpu_to_le32(len);
4783 curr_sg->Ext = cpu_to_le32(0);
4784 curr_sg++;
4785 }
4786 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
4787
4788 switch (cmd->sc_data_direction) {
4789 case DMA_TO_DEVICE:
4790 control |= IOACCEL1_CONTROL_DATA_OUT;
4791 break;
4792 case DMA_FROM_DEVICE:
4793 control |= IOACCEL1_CONTROL_DATA_IN;
4794 break;
4795 case DMA_NONE:
4796 control |= IOACCEL1_CONTROL_NODATAXFER;
4797 break;
4798 default:
4799 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4800 cmd->sc_data_direction);
4801 BUG();
4802 break;
4803 }
4804 } else {
4805 control |= IOACCEL1_CONTROL_NODATAXFER;
4806 }
4807
4808 c->Header.SGList = use_sg;
4809
4810 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
4811 cp->transfer_len = cpu_to_le32(total_len);
4812 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
4813 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
4814 cp->control = cpu_to_le32(control);
4815 memcpy(cp->CDB, cdb, cdb_len);
4816 memcpy(cp->CISS_LUN, scsi3addr, 8);
4817
4818 enqueue_cmd_and_start_io(h, c);
4819 return 0;
4820}
4821
4822
4823
4824
4825
4826static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4827 struct CommandList *c)
4828{
4829 struct scsi_cmnd *cmd = c->scsi_cmd;
4830 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4831
4832 if (!dev)
4833 return -1;
4834
4835 c->phys_disk = dev;
4836
4837 if (dev->in_reset)
4838 return -1;
4839
4840 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
4841 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
4842}
4843
4844
4845
4846
4847static void set_encrypt_ioaccel2(struct ctlr_info *h,
4848 struct CommandList *c, struct io_accel2_cmd *cp)
4849{
4850 struct scsi_cmnd *cmd = c->scsi_cmd;
4851 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4852 struct raid_map_data *map = &dev->raid_map;
4853 u64 first_block;
4854
4855
4856 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
4857 return;
4858
4859 cp->dekindex = map->dekindex;
4860
4861
4862 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
4863
4864
4865
4866
4867
4868 switch (cmd->cmnd[0]) {
4869
4870 case READ_6:
4871 case WRITE_6:
4872 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
4873 (cmd->cmnd[2] << 8) |
4874 cmd->cmnd[3]);
4875 break;
4876 case WRITE_10:
4877 case READ_10:
4878
4879 case WRITE_12:
4880 case READ_12:
4881 first_block = get_unaligned_be32(&cmd->cmnd[2]);
4882 break;
4883 case WRITE_16:
4884 case READ_16:
4885 first_block = get_unaligned_be64(&cmd->cmnd[2]);
4886 break;
4887 default:
4888 dev_err(&h->pdev->dev,
4889 "ERROR: %s: size (0x%x) not supported for encryption\n",
4890 __func__, cmd->cmnd[0]);
4891 BUG();
4892 break;
4893 }
4894
4895 if (le32_to_cpu(map->volume_blk_size) != 512)
4896 first_block = first_block *
4897 le32_to_cpu(map->volume_blk_size)/512;
4898
4899 cp->tweak_lower = cpu_to_le32(first_block);
4900 cp->tweak_upper = cpu_to_le32(first_block >> 32);
4901}
4902
4903static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4904 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4905 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4906{
4907 struct scsi_cmnd *cmd = c->scsi_cmd;
4908 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
4909 struct ioaccel2_sg_element *curr_sg;
4910 int use_sg, i;
4911 struct scatterlist *sg;
4912 u64 addr64;
4913 u32 len;
4914 u32 total_len = 0;
4915
4916 if (!cmd->device)
4917 return -1;
4918
4919 if (!cmd->device->hostdata)
4920 return -1;
4921
4922 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4923
4924 if (is_zero_length_transfer(cdb)) {
4925 warn_zero_length_transfer(h, cdb, cdb_len, __func__);
4926 atomic_dec(&phys_disk->ioaccel_cmds_out);
4927 return IO_ACCEL_INELIGIBLE;
4928 }
4929
4930 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4931 atomic_dec(&phys_disk->ioaccel_cmds_out);
4932 return IO_ACCEL_INELIGIBLE;
4933 }
4934
4935 c->cmd_type = CMD_IOACCEL2;
4936
4937 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
4938 (c->cmdindex * sizeof(*cp));
4939 BUG_ON(c->busaddr & 0x0000007F);
4940
4941 memset(cp, 0, sizeof(*cp));
4942 cp->IU_type = IOACCEL2_IU_TYPE;
4943
4944 use_sg = scsi_dma_map(cmd);
4945 if (use_sg < 0) {
4946 atomic_dec(&phys_disk->ioaccel_cmds_out);
4947 return use_sg;
4948 }
4949
4950 if (use_sg) {
4951 curr_sg = cp->sg;
4952 if (use_sg > h->ioaccel_maxsg) {
4953 addr64 = le64_to_cpu(
4954 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4955 curr_sg->address = cpu_to_le64(addr64);
4956 curr_sg->length = 0;
4957 curr_sg->reserved[0] = 0;
4958 curr_sg->reserved[1] = 0;
4959 curr_sg->reserved[2] = 0;
4960 curr_sg->chain_indicator = IOACCEL2_CHAIN;
4961
4962 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4963 }
4964 scsi_for_each_sg(cmd, sg, use_sg, i) {
4965 addr64 = (u64) sg_dma_address(sg);
4966 len = sg_dma_len(sg);
4967 total_len += len;
4968 curr_sg->address = cpu_to_le64(addr64);
4969 curr_sg->length = cpu_to_le32(len);
4970 curr_sg->reserved[0] = 0;
4971 curr_sg->reserved[1] = 0;
4972 curr_sg->reserved[2] = 0;
4973 curr_sg->chain_indicator = 0;
4974 curr_sg++;
4975 }
4976
4977
4978
4979
4980 (curr_sg - 1)->chain_indicator = IOACCEL2_LAST_SG;
4981
4982 switch (cmd->sc_data_direction) {
4983 case DMA_TO_DEVICE:
4984 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4985 cp->direction |= IOACCEL2_DIR_DATA_OUT;
4986 break;
4987 case DMA_FROM_DEVICE:
4988 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4989 cp->direction |= IOACCEL2_DIR_DATA_IN;
4990 break;
4991 case DMA_NONE:
4992 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4993 cp->direction |= IOACCEL2_DIR_NO_DATA;
4994 break;
4995 default:
4996 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4997 cmd->sc_data_direction);
4998 BUG();
4999 break;
5000 }
5001 } else {
5002 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
5003 cp->direction |= IOACCEL2_DIR_NO_DATA;
5004 }
5005
5006
5007 set_encrypt_ioaccel2(h, c, cp);
5008
5009 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
5010 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
5011 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
5012
5013 cp->data_len = cpu_to_le32(total_len);
5014 cp->err_ptr = cpu_to_le64(c->busaddr +
5015 offsetof(struct io_accel2_cmd, error_data));
5016 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
5017
5018
5019 if (use_sg > h->ioaccel_maxsg) {
5020 cp->sg_count = 1;
5021 cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0]));
5022 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
5023 atomic_dec(&phys_disk->ioaccel_cmds_out);
5024 scsi_dma_unmap(cmd);
5025 return -1;
5026 }
5027 } else
5028 cp->sg_count = (u8) use_sg;
5029
5030 if (phys_disk->in_reset) {
5031 cmd->result = DID_RESET << 16;
5032 return -1;
5033 }
5034
5035 enqueue_cmd_and_start_io(h, c);
5036 return 0;
5037}
5038
5039
5040
5041
5042static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
5043 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
5044 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
5045{
5046 if (!c->scsi_cmd->device)
5047 return -1;
5048
5049 if (!c->scsi_cmd->device->hostdata)
5050 return -1;
5051
5052 if (phys_disk->in_reset)
5053 return -1;
5054
5055
5056 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
5057 phys_disk->queue_depth) {
5058 atomic_dec(&phys_disk->ioaccel_cmds_out);
5059 return IO_ACCEL_INELIGIBLE;
5060 }
5061 if (h->transMethod & CFGTBL_Trans_io_accel1)
5062 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
5063 cdb, cdb_len, scsi3addr,
5064 phys_disk);
5065 else
5066 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
5067 cdb, cdb_len, scsi3addr,
5068 phys_disk);
5069}
5070
5071static void raid_map_helper(struct raid_map_data *map,
5072 int offload_to_mirror, u32 *map_index, u32 *current_group)
5073{
5074 if (offload_to_mirror == 0) {
5075
5076 *map_index %= le16_to_cpu(map->data_disks_per_row);
5077 return;
5078 }
5079 do {
5080
5081 *current_group = *map_index /
5082 le16_to_cpu(map->data_disks_per_row);
5083 if (offload_to_mirror == *current_group)
5084 continue;
5085 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
5086
5087 *map_index += le16_to_cpu(map->data_disks_per_row);
5088 (*current_group)++;
5089 } else {
5090
5091 *map_index %= le16_to_cpu(map->data_disks_per_row);
5092 *current_group = 0;
5093 }
5094 } while (offload_to_mirror != *current_group);
5095}
5096
5097
5098
5099
5100static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
5101 struct CommandList *c)
5102{
5103 struct scsi_cmnd *cmd = c->scsi_cmd;
5104 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5105 struct raid_map_data *map = &dev->raid_map;
5106 struct raid_map_disk_data *dd = &map->data[0];
5107 int is_write = 0;
5108 u32 map_index;
5109 u64 first_block, last_block;
5110 u32 block_cnt;
5111 u32 blocks_per_row;
5112 u64 first_row, last_row;
5113 u32 first_row_offset, last_row_offset;
5114 u32 first_column, last_column;
5115 u64 r0_first_row, r0_last_row;
5116 u32 r5or6_blocks_per_row;
5117 u64 r5or6_first_row, r5or6_last_row;
5118 u32 r5or6_first_row_offset, r5or6_last_row_offset;
5119 u32 r5or6_first_column, r5or6_last_column;
5120 u32 total_disks_per_row;
5121 u32 stripesize;
5122 u32 first_group, last_group, current_group;
5123 u32 map_row;
5124 u32 disk_handle;
5125 u64 disk_block;
5126 u32 disk_block_cnt;
5127 u8 cdb[16];
5128 u8 cdb_len;
5129 u16 strip_size;
5130#if BITS_PER_LONG == 32
5131 u64 tmpdiv;
5132#endif
5133 int offload_to_mirror;
5134
5135 if (!dev)
5136 return -1;
5137
5138 if (dev->in_reset)
5139 return -1;
5140
5141
5142 switch (cmd->cmnd[0]) {
5143 case WRITE_6:
5144 is_write = 1;
5145 fallthrough;
5146 case READ_6:
5147 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
5148 (cmd->cmnd[2] << 8) |
5149 cmd->cmnd[3]);
5150 block_cnt = cmd->cmnd[4];
5151 if (block_cnt == 0)
5152 block_cnt = 256;
5153 break;
5154 case WRITE_10:
5155 is_write = 1;
5156 fallthrough;
5157 case READ_10:
5158 first_block =
5159 (((u64) cmd->cmnd[2]) << 24) |
5160 (((u64) cmd->cmnd[3]) << 16) |
5161 (((u64) cmd->cmnd[4]) << 8) |
5162 cmd->cmnd[5];
5163 block_cnt =
5164 (((u32) cmd->cmnd[7]) << 8) |
5165 cmd->cmnd[8];
5166 break;
5167 case WRITE_12:
5168 is_write = 1;
5169 fallthrough;
5170 case READ_12:
5171 first_block =
5172 (((u64) cmd->cmnd[2]) << 24) |
5173 (((u64) cmd->cmnd[3]) << 16) |
5174 (((u64) cmd->cmnd[4]) << 8) |
5175 cmd->cmnd[5];
5176 block_cnt =
5177 (((u32) cmd->cmnd[6]) << 24) |
5178 (((u32) cmd->cmnd[7]) << 16) |
5179 (((u32) cmd->cmnd[8]) << 8) |
5180 cmd->cmnd[9];
5181 break;
5182 case WRITE_16:
5183 is_write = 1;
5184 fallthrough;
5185 case READ_16:
5186 first_block =
5187 (((u64) cmd->cmnd[2]) << 56) |
5188 (((u64) cmd->cmnd[3]) << 48) |
5189 (((u64) cmd->cmnd[4]) << 40) |
5190 (((u64) cmd->cmnd[5]) << 32) |
5191 (((u64) cmd->cmnd[6]) << 24) |
5192 (((u64) cmd->cmnd[7]) << 16) |
5193 (((u64) cmd->cmnd[8]) << 8) |
5194 cmd->cmnd[9];
5195 block_cnt =
5196 (((u32) cmd->cmnd[10]) << 24) |
5197 (((u32) cmd->cmnd[11]) << 16) |
5198 (((u32) cmd->cmnd[12]) << 8) |
5199 cmd->cmnd[13];
5200 break;
5201 default:
5202 return IO_ACCEL_INELIGIBLE;
5203 }
5204 last_block = first_block + block_cnt - 1;
5205
5206
5207 if (is_write && dev->raid_level != 0)
5208 return IO_ACCEL_INELIGIBLE;
5209
5210
5211 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
5212 last_block < first_block)
5213 return IO_ACCEL_INELIGIBLE;
5214
5215
5216 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
5217 le16_to_cpu(map->strip_size);
5218 strip_size = le16_to_cpu(map->strip_size);
5219#if BITS_PER_LONG == 32
5220 tmpdiv = first_block;
5221 (void) do_div(tmpdiv, blocks_per_row);
5222 first_row = tmpdiv;
5223 tmpdiv = last_block;
5224 (void) do_div(tmpdiv, blocks_per_row);
5225 last_row = tmpdiv;
5226 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5227 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5228 tmpdiv = first_row_offset;
5229 (void) do_div(tmpdiv, strip_size);
5230 first_column = tmpdiv;
5231 tmpdiv = last_row_offset;
5232 (void) do_div(tmpdiv, strip_size);
5233 last_column = tmpdiv;
5234#else
5235 first_row = first_block / blocks_per_row;
5236 last_row = last_block / blocks_per_row;
5237 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5238 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5239 first_column = first_row_offset / strip_size;
5240 last_column = last_row_offset / strip_size;
5241#endif
5242
5243
5244 if ((first_row != last_row) || (first_column != last_column))
5245 return IO_ACCEL_INELIGIBLE;
5246
5247
5248 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
5249 le16_to_cpu(map->metadata_disks_per_row);
5250 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5251 le16_to_cpu(map->row_cnt);
5252 map_index = (map_row * total_disks_per_row) + first_column;
5253
5254 switch (dev->raid_level) {
5255 case HPSA_RAID_0:
5256 break;
5257 case HPSA_RAID_1:
5258
5259
5260
5261
5262
5263 if (le16_to_cpu(map->layout_map_count) != 2) {
5264 hpsa_turn_off_ioaccel_for_device(dev);
5265 return IO_ACCEL_INELIGIBLE;
5266 }
5267 if (dev->offload_to_mirror)
5268 map_index += le16_to_cpu(map->data_disks_per_row);
5269 dev->offload_to_mirror = !dev->offload_to_mirror;
5270 break;
5271 case HPSA_RAID_ADM:
5272
5273
5274
5275
5276 if (le16_to_cpu(map->layout_map_count) != 3) {
5277 hpsa_turn_off_ioaccel_for_device(dev);
5278 return IO_ACCEL_INELIGIBLE;
5279 }
5280
5281 offload_to_mirror = dev->offload_to_mirror;
5282 raid_map_helper(map, offload_to_mirror,
5283 &map_index, ¤t_group);
5284
5285 offload_to_mirror =
5286 (offload_to_mirror >=
5287 le16_to_cpu(map->layout_map_count) - 1)
5288 ? 0 : offload_to_mirror + 1;
5289 dev->offload_to_mirror = offload_to_mirror;
5290
5291
5292
5293
5294 break;
5295 case HPSA_RAID_5:
5296 case HPSA_RAID_6:
5297 if (le16_to_cpu(map->layout_map_count) <= 1)
5298 break;
5299
5300
5301 r5or6_blocks_per_row =
5302 le16_to_cpu(map->strip_size) *
5303 le16_to_cpu(map->data_disks_per_row);
5304 if (r5or6_blocks_per_row == 0) {
5305 hpsa_turn_off_ioaccel_for_device(dev);
5306 return IO_ACCEL_INELIGIBLE;
5307 }
5308 stripesize = r5or6_blocks_per_row *
5309 le16_to_cpu(map->layout_map_count);
5310#if BITS_PER_LONG == 32
5311 tmpdiv = first_block;
5312 first_group = do_div(tmpdiv, stripesize);
5313 tmpdiv = first_group;
5314 (void) do_div(tmpdiv, r5or6_blocks_per_row);
5315 first_group = tmpdiv;
5316 tmpdiv = last_block;
5317 last_group = do_div(tmpdiv, stripesize);
5318 tmpdiv = last_group;
5319 (void) do_div(tmpdiv, r5or6_blocks_per_row);
5320 last_group = tmpdiv;
5321#else
5322 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
5323 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
5324#endif
5325 if (first_group != last_group)
5326 return IO_ACCEL_INELIGIBLE;
5327
5328
5329#if BITS_PER_LONG == 32
5330 tmpdiv = first_block;
5331 (void) do_div(tmpdiv, stripesize);
5332 first_row = r5or6_first_row = r0_first_row = tmpdiv;
5333 tmpdiv = last_block;
5334 (void) do_div(tmpdiv, stripesize);
5335 r5or6_last_row = r0_last_row = tmpdiv;
5336#else
5337 first_row = r5or6_first_row = r0_first_row =
5338 first_block / stripesize;
5339 r5or6_last_row = r0_last_row = last_block / stripesize;
5340#endif
5341 if (r5or6_first_row != r5or6_last_row)
5342 return IO_ACCEL_INELIGIBLE;
5343
5344
5345
5346#if BITS_PER_LONG == 32
5347 tmpdiv = first_block;
5348 first_row_offset = do_div(tmpdiv, stripesize);
5349 tmpdiv = first_row_offset;
5350 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
5351 r5or6_first_row_offset = first_row_offset;
5352 tmpdiv = last_block;
5353 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
5354 tmpdiv = r5or6_last_row_offset;
5355 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
5356 tmpdiv = r5or6_first_row_offset;
5357 (void) do_div(tmpdiv, map->strip_size);
5358 first_column = r5or6_first_column = tmpdiv;
5359 tmpdiv = r5or6_last_row_offset;
5360 (void) do_div(tmpdiv, map->strip_size);
5361 r5or6_last_column = tmpdiv;
5362#else
5363 first_row_offset = r5or6_first_row_offset =
5364 (u32)((first_block % stripesize) %
5365 r5or6_blocks_per_row);
5366
5367 r5or6_last_row_offset =
5368 (u32)((last_block % stripesize) %
5369 r5or6_blocks_per_row);
5370
5371 first_column = r5or6_first_column =
5372 r5or6_first_row_offset / le16_to_cpu(map->strip_size);
5373 r5or6_last_column =
5374 r5or6_last_row_offset / le16_to_cpu(map->strip_size);
5375#endif
5376 if (r5or6_first_column != r5or6_last_column)
5377 return IO_ACCEL_INELIGIBLE;
5378
5379
5380 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5381 le16_to_cpu(map->row_cnt);
5382
5383 map_index = (first_group *
5384 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
5385 (map_row * total_disks_per_row) + first_column;
5386 break;
5387 default:
5388 return IO_ACCEL_INELIGIBLE;
5389 }
5390
5391 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
5392 return IO_ACCEL_INELIGIBLE;
5393
5394 c->phys_disk = dev->phys_disk[map_index];
5395 if (!c->phys_disk)
5396 return IO_ACCEL_INELIGIBLE;
5397
5398 disk_handle = dd[map_index].ioaccel_handle;
5399 disk_block = le64_to_cpu(map->disk_starting_blk) +
5400 first_row * le16_to_cpu(map->strip_size) +
5401 (first_row_offset - first_column *
5402 le16_to_cpu(map->strip_size));
5403 disk_block_cnt = block_cnt;
5404
5405
5406 if (map->phys_blk_shift) {
5407 disk_block <<= map->phys_blk_shift;
5408 disk_block_cnt <<= map->phys_blk_shift;
5409 }
5410 BUG_ON(disk_block_cnt > 0xffff);
5411
5412
5413 if (disk_block > 0xffffffff) {
5414 cdb[0] = is_write ? WRITE_16 : READ_16;
5415 cdb[1] = 0;
5416 cdb[2] = (u8) (disk_block >> 56);
5417 cdb[3] = (u8) (disk_block >> 48);
5418 cdb[4] = (u8) (disk_block >> 40);
5419 cdb[5] = (u8) (disk_block >> 32);
5420 cdb[6] = (u8) (disk_block >> 24);
5421 cdb[7] = (u8) (disk_block >> 16);
5422 cdb[8] = (u8) (disk_block >> 8);
5423 cdb[9] = (u8) (disk_block);
5424 cdb[10] = (u8) (disk_block_cnt >> 24);
5425 cdb[11] = (u8) (disk_block_cnt >> 16);
5426 cdb[12] = (u8) (disk_block_cnt >> 8);
5427 cdb[13] = (u8) (disk_block_cnt);
5428 cdb[14] = 0;
5429 cdb[15] = 0;
5430 cdb_len = 16;
5431 } else {
5432 cdb[0] = is_write ? WRITE_10 : READ_10;
5433 cdb[1] = 0;
5434 cdb[2] = (u8) (disk_block >> 24);
5435 cdb[3] = (u8) (disk_block >> 16);
5436 cdb[4] = (u8) (disk_block >> 8);
5437 cdb[5] = (u8) (disk_block);
5438 cdb[6] = 0;
5439 cdb[7] = (u8) (disk_block_cnt >> 8);
5440 cdb[8] = (u8) (disk_block_cnt);
5441 cdb[9] = 0;
5442 cdb_len = 10;
5443 }
5444 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
5445 dev->scsi3addr,
5446 dev->phys_disk[map_index]);
5447}
5448
5449
5450
5451
5452
5453
5454static int hpsa_ciss_submit(struct ctlr_info *h,
5455 struct CommandList *c, struct scsi_cmnd *cmd,
5456 struct hpsa_scsi_dev_t *dev)
5457{
5458 cmd->host_scribble = (unsigned char *) c;
5459 c->cmd_type = CMD_SCSI;
5460 c->scsi_cmd = cmd;
5461 c->Header.ReplyQueue = 0;
5462 memcpy(&c->Header.LUN.LunAddrBytes[0], &dev->scsi3addr[0], 8);
5463 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
5464
5465
5466
5467 c->Request.Timeout = 0;
5468 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
5469 c->Request.CDBLen = cmd->cmd_len;
5470 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
5471 switch (cmd->sc_data_direction) {
5472 case DMA_TO_DEVICE:
5473 c->Request.type_attr_dir =
5474 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
5475 break;
5476 case DMA_FROM_DEVICE:
5477 c->Request.type_attr_dir =
5478 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
5479 break;
5480 case DMA_NONE:
5481 c->Request.type_attr_dir =
5482 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
5483 break;
5484 case DMA_BIDIRECTIONAL:
5485
5486
5487
5488
5489
5490 c->Request.type_attr_dir =
5491 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
5492
5493
5494
5495
5496
5497
5498
5499
5500 break;
5501
5502 default:
5503 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
5504 cmd->sc_data_direction);
5505 BUG();
5506 break;
5507 }
5508
5509 if (hpsa_scatter_gather(h, c, cmd) < 0) {
5510 hpsa_cmd_resolve_and_free(h, c);
5511 return SCSI_MLQUEUE_HOST_BUSY;
5512 }
5513
5514 if (dev->in_reset) {
5515 hpsa_cmd_resolve_and_free(h, c);
5516 return SCSI_MLQUEUE_HOST_BUSY;
5517 }
5518
5519 c->device = dev;
5520
5521 enqueue_cmd_and_start_io(h, c);
5522
5523 return 0;
5524}
5525
5526static void hpsa_cmd_init(struct ctlr_info *h, int index,
5527 struct CommandList *c)
5528{
5529 dma_addr_t cmd_dma_handle, err_dma_handle;
5530
5531
5532 memset(c, 0, offsetof(struct CommandList, refcount));
5533 c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
5534 cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5535 c->err_info = h->errinfo_pool + index;
5536 memset(c->err_info, 0, sizeof(*c->err_info));
5537 err_dma_handle = h->errinfo_pool_dhandle
5538 + index * sizeof(*c->err_info);
5539 c->cmdindex = index;
5540 c->busaddr = (u32) cmd_dma_handle;
5541 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
5542 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
5543 c->h = h;
5544 c->scsi_cmd = SCSI_CMD_IDLE;
5545}
5546
5547static void hpsa_preinitialize_commands(struct ctlr_info *h)
5548{
5549 int i;
5550
5551 for (i = 0; i < h->nr_cmds; i++) {
5552 struct CommandList *c = h->cmd_pool + i;
5553
5554 hpsa_cmd_init(h, i, c);
5555 atomic_set(&c->refcount, 0);
5556 }
5557}
5558
5559static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
5560 struct CommandList *c)
5561{
5562 dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5563
5564 BUG_ON(c->cmdindex != index);
5565
5566 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
5567 memset(c->err_info, 0, sizeof(*c->err_info));
5568 c->busaddr = (u32) cmd_dma_handle;
5569}
5570
5571static int hpsa_ioaccel_submit(struct ctlr_info *h,
5572 struct CommandList *c, struct scsi_cmnd *cmd,
5573 bool retry)
5574{
5575 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5576 int rc = IO_ACCEL_INELIGIBLE;
5577
5578 if (!dev)
5579 return SCSI_MLQUEUE_HOST_BUSY;
5580
5581 if (dev->in_reset)
5582 return SCSI_MLQUEUE_HOST_BUSY;
5583
5584 if (hpsa_simple_mode)
5585 return IO_ACCEL_INELIGIBLE;
5586
5587 cmd->host_scribble = (unsigned char *) c;
5588
5589 if (dev->offload_enabled) {
5590 hpsa_cmd_init(h, c->cmdindex, c);
5591 c->cmd_type = CMD_SCSI;
5592 c->scsi_cmd = cmd;
5593 c->device = dev;
5594 if (retry)
5595 c->retry_pending = true;
5596 rc = hpsa_scsi_ioaccel_raid_map(h, c);
5597 if (rc < 0)
5598 rc = SCSI_MLQUEUE_HOST_BUSY;
5599 } else if (dev->hba_ioaccel_enabled) {
5600 hpsa_cmd_init(h, c->cmdindex, c);
5601 c->cmd_type = CMD_SCSI;
5602 c->scsi_cmd = cmd;
5603 c->device = dev;
5604 if (retry)
5605 c->retry_pending = true;
5606 rc = hpsa_scsi_ioaccel_direct_map(h, c);
5607 if (rc < 0)
5608 rc = SCSI_MLQUEUE_HOST_BUSY;
5609 }
5610 return rc;
5611}
5612
5613static void hpsa_command_resubmit_worker(struct work_struct *work)
5614{
5615 struct scsi_cmnd *cmd;
5616 struct hpsa_scsi_dev_t *dev;
5617 struct CommandList *c = container_of(work, struct CommandList, work);
5618
5619 cmd = c->scsi_cmd;
5620 dev = cmd->device->hostdata;
5621 if (!dev) {
5622 cmd->result = DID_NO_CONNECT << 16;
5623 return hpsa_cmd_free_and_done(c->h, c, cmd);
5624 }
5625
5626 if (dev->in_reset) {
5627 cmd->result = DID_RESET << 16;
5628 return hpsa_cmd_free_and_done(c->h, c, cmd);
5629 }
5630
5631 if (c->cmd_type == CMD_IOACCEL2) {
5632 struct ctlr_info *h = c->h;
5633 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5634 int rc;
5635
5636 if (c2->error_data.serv_response ==
5637 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
5638
5639 rc = hpsa_ioaccel_submit(h, c, cmd, true);
5640 if (rc == 0)
5641 return;
5642 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5643
5644
5645
5646
5647
5648 cmd->result = DID_IMM_RETRY << 16;
5649 return hpsa_cmd_free_and_done(h, c, cmd);
5650 }
5651
5652 }
5653 }
5654 hpsa_cmd_partial_init(c->h, c->cmdindex, c);
5655
5656
5657
5658
5659
5660
5661
5662
5663 c->retry_pending = true;
5664 if (hpsa_ciss_submit(c->h, c, cmd, dev)) {
5665
5666
5667
5668
5669
5670
5671
5672
5673 cmd->result = DID_IMM_RETRY << 16;
5674 cmd->scsi_done(cmd);
5675 }
5676}
5677
5678
5679static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
5680{
5681 struct ctlr_info *h;
5682 struct hpsa_scsi_dev_t *dev;
5683 struct CommandList *c;
5684 int rc = 0;
5685
5686
5687 h = sdev_to_hba(cmd->device);
5688
5689 BUG_ON(cmd->request->tag < 0);
5690
5691 dev = cmd->device->hostdata;
5692 if (!dev) {
5693 cmd->result = DID_NO_CONNECT << 16;
5694 cmd->scsi_done(cmd);
5695 return 0;
5696 }
5697
5698 if (dev->removed) {
5699 cmd->result = DID_NO_CONNECT << 16;
5700 cmd->scsi_done(cmd);
5701 return 0;
5702 }
5703
5704 if (unlikely(lockup_detected(h))) {
5705 cmd->result = DID_NO_CONNECT << 16;
5706 cmd->scsi_done(cmd);
5707 return 0;
5708 }
5709
5710 if (dev->in_reset)
5711 return SCSI_MLQUEUE_DEVICE_BUSY;
5712
5713 c = cmd_tagged_alloc(h, cmd);
5714 if (c == NULL)
5715 return SCSI_MLQUEUE_DEVICE_BUSY;
5716
5717
5718
5719
5720
5721 cmd->result = 0;
5722
5723
5724
5725
5726
5727
5728
5729
5730
5731 if (likely(cmd->retries == 0 &&
5732 !blk_rq_is_passthrough(cmd->request) &&
5733 h->acciopath_status)) {
5734
5735 rc = hpsa_ioaccel_submit(h, c, cmd, false);
5736 if (rc == 0)
5737 return 0;
5738 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5739 hpsa_cmd_resolve_and_free(h, c);
5740 return SCSI_MLQUEUE_HOST_BUSY;
5741 }
5742 }
5743 return hpsa_ciss_submit(h, c, cmd, dev);
5744}
5745
5746static void hpsa_scan_complete(struct ctlr_info *h)
5747{
5748 unsigned long flags;
5749
5750 spin_lock_irqsave(&h->scan_lock, flags);
5751 h->scan_finished = 1;
5752 wake_up(&h->scan_wait_queue);
5753 spin_unlock_irqrestore(&h->scan_lock, flags);
5754}
5755
5756static void hpsa_scan_start(struct Scsi_Host *sh)
5757{
5758 struct ctlr_info *h = shost_to_hba(sh);
5759 unsigned long flags;
5760
5761
5762
5763
5764
5765
5766
5767 if (unlikely(lockup_detected(h)))
5768 return hpsa_scan_complete(h);
5769
5770
5771
5772
5773 spin_lock_irqsave(&h->scan_lock, flags);
5774 if (h->scan_waiting) {
5775 spin_unlock_irqrestore(&h->scan_lock, flags);
5776 return;
5777 }
5778
5779 spin_unlock_irqrestore(&h->scan_lock, flags);
5780
5781
5782 while (1) {
5783 spin_lock_irqsave(&h->scan_lock, flags);
5784 if (h->scan_finished)
5785 break;
5786 h->scan_waiting = 1;
5787 spin_unlock_irqrestore(&h->scan_lock, flags);
5788 wait_event(h->scan_wait_queue, h->scan_finished);
5789
5790
5791
5792
5793
5794 }
5795 h->scan_finished = 0;
5796 h->scan_waiting = 0;
5797 spin_unlock_irqrestore(&h->scan_lock, flags);
5798
5799 if (unlikely(lockup_detected(h)))
5800 return hpsa_scan_complete(h);
5801
5802
5803
5804
5805 spin_lock_irqsave(&h->reset_lock, flags);
5806 if (h->reset_in_progress) {
5807 h->drv_req_rescan = 1;
5808 spin_unlock_irqrestore(&h->reset_lock, flags);
5809 hpsa_scan_complete(h);
5810 return;
5811 }
5812 spin_unlock_irqrestore(&h->reset_lock, flags);
5813
5814 hpsa_update_scsi_devices(h);
5815
5816 hpsa_scan_complete(h);
5817}
5818
5819static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
5820{
5821 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
5822
5823 if (!logical_drive)
5824 return -ENODEV;
5825
5826 if (qdepth < 1)
5827 qdepth = 1;
5828 else if (qdepth > logical_drive->queue_depth)
5829 qdepth = logical_drive->queue_depth;
5830
5831 return scsi_change_queue_depth(sdev, qdepth);
5832}
5833
5834static int hpsa_scan_finished(struct Scsi_Host *sh,
5835 unsigned long elapsed_time)
5836{
5837 struct ctlr_info *h = shost_to_hba(sh);
5838 unsigned long flags;
5839 int finished;
5840
5841 spin_lock_irqsave(&h->scan_lock, flags);
5842 finished = h->scan_finished;
5843 spin_unlock_irqrestore(&h->scan_lock, flags);
5844 return finished;
5845}
5846
5847static int hpsa_scsi_host_alloc(struct ctlr_info *h)
5848{
5849 struct Scsi_Host *sh;
5850
5851 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
5852 if (sh == NULL) {
5853 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
5854 return -ENOMEM;
5855 }
5856
5857 sh->io_port = 0;
5858 sh->n_io_port = 0;
5859 sh->this_id = -1;
5860 sh->max_channel = 3;
5861 sh->max_cmd_len = MAX_COMMAND_SIZE;
5862 sh->max_lun = HPSA_MAX_LUN;
5863 sh->max_id = HPSA_MAX_LUN;
5864 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
5865 sh->cmd_per_lun = sh->can_queue;
5866 sh->sg_tablesize = h->maxsgentries;
5867 sh->transportt = hpsa_sas_transport_template;
5868 sh->hostdata[0] = (unsigned long) h;
5869 sh->irq = pci_irq_vector(h->pdev, 0);
5870 sh->unique_id = sh->irq;
5871
5872 h->scsi_host = sh;
5873 return 0;
5874}
5875
5876static int hpsa_scsi_add_host(struct ctlr_info *h)
5877{
5878 int rv;
5879
5880 rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
5881 if (rv) {
5882 dev_err(&h->pdev->dev, "scsi_add_host failed\n");
5883 return rv;
5884 }
5885 scsi_scan_host(h->scsi_host);
5886 return 0;
5887}
5888
5889
5890
5891
5892
5893
5894
5895static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
5896{
5897 int idx = scmd->request->tag;
5898
5899 if (idx < 0)
5900 return idx;
5901
5902
5903 return idx += HPSA_NRESERVED_CMDS;
5904}
5905
5906
5907
5908
5909
5910static int hpsa_send_test_unit_ready(struct ctlr_info *h,
5911 struct CommandList *c, unsigned char lunaddr[],
5912 int reply_queue)
5913{
5914 int rc;
5915
5916
5917 (void) fill_cmd(c, TEST_UNIT_READY, h,
5918 NULL, 0, 0, lunaddr, TYPE_CMD);
5919 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5920 if (rc)
5921 return rc;
5922
5923
5924
5925 if (c->err_info->CommandStatus == CMD_SUCCESS)
5926 return 0;
5927
5928
5929
5930
5931
5932
5933 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5934 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
5935 (c->err_info->SenseInfo[2] == NO_SENSE ||
5936 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
5937 return 0;
5938
5939 return 1;
5940}
5941
5942
5943
5944
5945
5946static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
5947 struct CommandList *c,
5948 unsigned char lunaddr[], int reply_queue)
5949{
5950 int rc;
5951 int count = 0;
5952 int waittime = 1;
5953
5954
5955 for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
5956
5957
5958
5959
5960
5961 msleep(1000 * waittime);
5962
5963 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
5964 if (!rc)
5965 break;
5966
5967
5968 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
5969 waittime *= 2;
5970
5971 dev_warn(&h->pdev->dev,
5972 "waiting %d secs for device to become ready.\n",
5973 waittime);
5974 }
5975
5976 return rc;
5977}
5978
5979static int wait_for_device_to_become_ready(struct ctlr_info *h,
5980 unsigned char lunaddr[],
5981 int reply_queue)
5982{
5983 int first_queue;
5984 int last_queue;
5985 int rq;
5986 int rc = 0;
5987 struct CommandList *c;
5988
5989 c = cmd_alloc(h);
5990
5991
5992
5993
5994
5995
5996 if (reply_queue == DEFAULT_REPLY_QUEUE) {
5997 first_queue = 0;
5998 last_queue = h->nreply_queues - 1;
5999 } else {
6000 first_queue = reply_queue;
6001 last_queue = reply_queue;
6002 }
6003
6004 for (rq = first_queue; rq <= last_queue; rq++) {
6005 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
6006 if (rc)
6007 break;
6008 }
6009
6010 if (rc)
6011 dev_warn(&h->pdev->dev, "giving up on device.\n");
6012 else
6013 dev_warn(&h->pdev->dev, "device is ready.\n");
6014
6015 cmd_free(h, c);
6016 return rc;
6017}
6018
6019
6020
6021
6022static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
6023{
6024 int rc = SUCCESS;
6025 int i;
6026 struct ctlr_info *h;
6027 struct hpsa_scsi_dev_t *dev = NULL;
6028 u8 reset_type;
6029 char msg[48];
6030 unsigned long flags;
6031
6032
6033 h = sdev_to_hba(scsicmd->device);
6034 if (h == NULL)
6035 return FAILED;
6036
6037 spin_lock_irqsave(&h->reset_lock, flags);
6038 h->reset_in_progress = 1;
6039 spin_unlock_irqrestore(&h->reset_lock, flags);
6040
6041 if (lockup_detected(h)) {
6042 rc = FAILED;
6043 goto return_reset_status;
6044 }
6045
6046 dev = scsicmd->device->hostdata;
6047 if (!dev) {
6048 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
6049 rc = FAILED;
6050 goto return_reset_status;
6051 }
6052
6053 if (dev->devtype == TYPE_ENCLOSURE) {
6054 rc = SUCCESS;
6055 goto return_reset_status;
6056 }
6057
6058
6059 if (lockup_detected(h)) {
6060 snprintf(msg, sizeof(msg),
6061 "cmd %d RESET FAILED, lockup detected",
6062 hpsa_get_cmd_index(scsicmd));
6063 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6064 rc = FAILED;
6065 goto return_reset_status;
6066 }
6067
6068
6069 if (detect_controller_lockup(h)) {
6070 snprintf(msg, sizeof(msg),
6071 "cmd %d RESET FAILED, new lockup detected",
6072 hpsa_get_cmd_index(scsicmd));
6073 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6074 rc = FAILED;
6075 goto return_reset_status;
6076 }
6077
6078
6079 if (is_hba_lunid(dev->scsi3addr)) {
6080 rc = SUCCESS;
6081 goto return_reset_status;
6082 }
6083
6084 if (is_logical_dev_addr_mode(dev->scsi3addr))
6085 reset_type = HPSA_DEVICE_RESET_MSG;
6086 else
6087 reset_type = HPSA_PHYS_TARGET_RESET;
6088
6089 sprintf(msg, "resetting %s",
6090 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
6091 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6092
6093
6094
6095
6096 dev->in_reset = true;
6097 for (i = 0; i < 10; i++) {
6098 if (atomic_read(&dev->commands_outstanding) > 0)
6099 msleep(1000);
6100 else
6101 break;
6102 }
6103
6104
6105 rc = hpsa_do_reset(h, dev, reset_type, DEFAULT_REPLY_QUEUE);
6106 if (rc == 0)
6107 rc = SUCCESS;
6108 else
6109 rc = FAILED;
6110
6111 sprintf(msg, "reset %s %s",
6112 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
6113 rc == SUCCESS ? "completed successfully" : "failed");
6114 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
6115
6116return_reset_status:
6117 spin_lock_irqsave(&h->reset_lock, flags);
6118 h->reset_in_progress = 0;
6119 if (dev)
6120 dev->in_reset = false;
6121 spin_unlock_irqrestore(&h->reset_lock, flags);
6122 return rc;
6123}
6124
6125
6126
6127
6128
6129
6130
6131
6132static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
6133 struct scsi_cmnd *scmd)
6134{
6135 int idx = hpsa_get_cmd_index(scmd);
6136 struct CommandList *c = h->cmd_pool + idx;
6137
6138 if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
6139 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
6140 idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
6141
6142
6143
6144 BUG();
6145 }
6146
6147 if (unlikely(!hpsa_is_cmd_idle(c))) {
6148
6149
6150
6151
6152
6153
6154 if (idx != h->last_collision_tag) {
6155 dev_warn(&h->pdev->dev,
6156 "%s: tag collision (tag=%d)\n", __func__, idx);
6157 if (scmd)
6158 scsi_print_command(scmd);
6159 h->last_collision_tag = idx;
6160 }
6161 return NULL;
6162 }
6163
6164 atomic_inc(&c->refcount);
6165 hpsa_cmd_partial_init(h, idx, c);
6166
6167
6168
6169
6170
6171 c->retry_pending = false;
6172
6173 return c;
6174}
6175
6176static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
6177{
6178
6179
6180
6181
6182 (void)atomic_dec(&c->refcount);
6183}
6184
6185
6186
6187
6188
6189
6190
6191
6192
6193
6194static struct CommandList *cmd_alloc(struct ctlr_info *h)
6195{
6196 struct CommandList *c;
6197 int refcount, i;
6198 int offset = 0;
6199
6200
6201
6202
6203
6204
6205
6206
6207
6208
6209
6210
6211
6212
6213
6214
6215
6216
6217
6218
6219 for (;;) {
6220 i = find_next_zero_bit(h->cmd_pool_bits,
6221 HPSA_NRESERVED_CMDS,
6222 offset);
6223 if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
6224 offset = 0;
6225 continue;
6226 }
6227 c = h->cmd_pool + i;
6228 refcount = atomic_inc_return(&c->refcount);
6229 if (unlikely(refcount > 1)) {
6230 cmd_free(h, c);
6231 offset = (i + 1) % HPSA_NRESERVED_CMDS;
6232 continue;
6233 }
6234 set_bit(i & (BITS_PER_LONG - 1),
6235 h->cmd_pool_bits + (i / BITS_PER_LONG));
6236 break;
6237 }
6238 hpsa_cmd_partial_init(h, i, c);
6239 c->device = NULL;
6240
6241
6242
6243
6244
6245 c->retry_pending = false;
6246
6247 return c;
6248}
6249
6250
6251
6252
6253
6254
6255
6256static void cmd_free(struct ctlr_info *h, struct CommandList *c)
6257{
6258 if (atomic_dec_and_test(&c->refcount)) {
6259 int i;
6260
6261 i = c - h->cmd_pool;
6262 clear_bit(i & (BITS_PER_LONG - 1),
6263 h->cmd_pool_bits + (i / BITS_PER_LONG));
6264 }
6265}
6266
6267#ifdef CONFIG_COMPAT
6268
6269static int hpsa_ioctl32_passthru(struct scsi_device *dev, unsigned int cmd,
6270 void __user *arg)
6271{
6272 struct ctlr_info *h = sdev_to_hba(dev);
6273 IOCTL32_Command_struct __user *arg32 = arg;
6274 IOCTL_Command_struct arg64;
6275 int err;
6276 u32 cp;
6277
6278 if (!arg)
6279 return -EINVAL;
6280
6281 memset(&arg64, 0, sizeof(arg64));
6282 if (copy_from_user(&arg64, arg32, offsetof(IOCTL_Command_struct, buf)))
6283 return -EFAULT;
6284 if (get_user(cp, &arg32->buf))
6285 return -EFAULT;
6286 arg64.buf = compat_ptr(cp);
6287
6288 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6289 return -EAGAIN;
6290 err = hpsa_passthru_ioctl(h, &arg64);
6291 atomic_inc(&h->passthru_cmds_avail);
6292 if (err)
6293 return err;
6294 if (copy_to_user(&arg32->error_info, &arg64.error_info,
6295 sizeof(arg32->error_info)))
6296 return -EFAULT;
6297 return 0;
6298}
6299
6300static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
6301 unsigned int cmd, void __user *arg)
6302{
6303 struct ctlr_info *h = sdev_to_hba(dev);
6304 BIG_IOCTL32_Command_struct __user *arg32 = arg;
6305 BIG_IOCTL_Command_struct arg64;
6306 int err;
6307 u32 cp;
6308
6309 if (!arg)
6310 return -EINVAL;
6311 memset(&arg64, 0, sizeof(arg64));
6312 if (copy_from_user(&arg64, arg32,
6313 offsetof(BIG_IOCTL32_Command_struct, buf)))
6314 return -EFAULT;
6315 if (get_user(cp, &arg32->buf))
6316 return -EFAULT;
6317 arg64.buf = compat_ptr(cp);
6318
6319 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6320 return -EAGAIN;
6321 err = hpsa_big_passthru_ioctl(h, &arg64);
6322 atomic_inc(&h->passthru_cmds_avail);
6323 if (err)
6324 return err;
6325 if (copy_to_user(&arg32->error_info, &arg64.error_info,
6326 sizeof(arg32->error_info)))
6327 return -EFAULT;
6328 return 0;
6329}
6330
6331static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
6332 void __user *arg)
6333{
6334 switch (cmd) {
6335 case CCISS_GETPCIINFO:
6336 case CCISS_GETINTINFO:
6337 case CCISS_SETINTINFO:
6338 case CCISS_GETNODENAME:
6339 case CCISS_SETNODENAME:
6340 case CCISS_GETHEARTBEAT:
6341 case CCISS_GETBUSTYPES:
6342 case CCISS_GETFIRMVER:
6343 case CCISS_GETDRIVVER:
6344 case CCISS_REVALIDVOLS:
6345 case CCISS_DEREGDISK:
6346 case CCISS_REGNEWDISK:
6347 case CCISS_REGNEWD:
6348 case CCISS_RESCANDISK:
6349 case CCISS_GETLUNINFO:
6350 return hpsa_ioctl(dev, cmd, arg);
6351
6352 case CCISS_PASSTHRU32:
6353 return hpsa_ioctl32_passthru(dev, cmd, arg);
6354 case CCISS_BIG_PASSTHRU32:
6355 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
6356
6357 default:
6358 return -ENOIOCTLCMD;
6359 }
6360}
6361#endif
6362
6363static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
6364{
6365 struct hpsa_pci_info pciinfo;
6366
6367 if (!argp)
6368 return -EINVAL;
6369 pciinfo.domain = pci_domain_nr(h->pdev->bus);
6370 pciinfo.bus = h->pdev->bus->number;
6371 pciinfo.dev_fn = h->pdev->devfn;
6372 pciinfo.board_id = h->board_id;
6373 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
6374 return -EFAULT;
6375 return 0;
6376}
6377
6378static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
6379{
6380 DriverVer_type DriverVer;
6381 unsigned char vmaj, vmin, vsubmin;
6382 int rc;
6383
6384 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
6385 &vmaj, &vmin, &vsubmin);
6386 if (rc != 3) {
6387 dev_info(&h->pdev->dev, "driver version string '%s' "
6388 "unrecognized.", HPSA_DRIVER_VERSION);
6389 vmaj = 0;
6390 vmin = 0;
6391 vsubmin = 0;
6392 }
6393 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
6394 if (!argp)
6395 return -EINVAL;
6396 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
6397 return -EFAULT;
6398 return 0;
6399}
6400
6401static int hpsa_passthru_ioctl(struct ctlr_info *h,
6402 IOCTL_Command_struct *iocommand)
6403{
6404 struct CommandList *c;
6405 char *buff = NULL;
6406 u64 temp64;
6407 int rc = 0;
6408
6409 if (!capable(CAP_SYS_RAWIO))
6410 return -EPERM;
6411 if ((iocommand->buf_size < 1) &&
6412 (iocommand->Request.Type.Direction != XFER_NONE)) {
6413 return -EINVAL;
6414 }
6415 if (iocommand->buf_size > 0) {
6416 buff = kmalloc(iocommand->buf_size, GFP_KERNEL);
6417 if (buff == NULL)
6418 return -ENOMEM;
6419 if (iocommand->Request.Type.Direction & XFER_WRITE) {
6420
6421 if (copy_from_user(buff, iocommand->buf,
6422 iocommand->buf_size)) {
6423 rc = -EFAULT;
6424 goto out_kfree;
6425 }
6426 } else {
6427 memset(buff, 0, iocommand->buf_size);
6428 }
6429 }
6430 c = cmd_alloc(h);
6431
6432
6433 c->cmd_type = CMD_IOCTL_PEND;
6434 c->scsi_cmd = SCSI_CMD_BUSY;
6435
6436 c->Header.ReplyQueue = 0;
6437 if (iocommand->buf_size > 0) {
6438 c->Header.SGList = 1;
6439 c->Header.SGTotal = cpu_to_le16(1);
6440 } else {
6441 c->Header.SGList = 0;
6442 c->Header.SGTotal = cpu_to_le16(0);
6443 }
6444 memcpy(&c->Header.LUN, &iocommand->LUN_info, sizeof(c->Header.LUN));
6445
6446
6447 memcpy(&c->Request, &iocommand->Request,
6448 sizeof(c->Request));
6449
6450
6451 if (iocommand->buf_size > 0) {
6452 temp64 = dma_map_single(&h->pdev->dev, buff,
6453 iocommand->buf_size, DMA_BIDIRECTIONAL);
6454 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
6455 c->SG[0].Addr = cpu_to_le64(0);
6456 c->SG[0].Len = cpu_to_le32(0);
6457 rc = -ENOMEM;
6458 goto out;
6459 }
6460 c->SG[0].Addr = cpu_to_le64(temp64);
6461 c->SG[0].Len = cpu_to_le32(iocommand->buf_size);
6462 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST);
6463 }
6464 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6465 NO_TIMEOUT);
6466 if (iocommand->buf_size > 0)
6467 hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL);
6468 check_ioctl_unit_attention(h, c);
6469 if (rc) {
6470 rc = -EIO;
6471 goto out;
6472 }
6473
6474
6475 memcpy(&iocommand->error_info, c->err_info,
6476 sizeof(iocommand->error_info));
6477 if ((iocommand->Request.Type.Direction & XFER_READ) &&
6478 iocommand->buf_size > 0) {
6479
6480 if (copy_to_user(iocommand->buf, buff, iocommand->buf_size)) {
6481 rc = -EFAULT;
6482 goto out;
6483 }
6484 }
6485out:
6486 cmd_free(h, c);
6487out_kfree:
6488 kfree(buff);
6489 return rc;
6490}
6491
6492static int hpsa_big_passthru_ioctl(struct ctlr_info *h,
6493 BIG_IOCTL_Command_struct *ioc)
6494{
6495 struct CommandList *c;
6496 unsigned char **buff = NULL;
6497 int *buff_size = NULL;
6498 u64 temp64;
6499 BYTE sg_used = 0;
6500 int status = 0;
6501 u32 left;
6502 u32 sz;
6503 BYTE __user *data_ptr;
6504
6505 if (!capable(CAP_SYS_RAWIO))
6506 return -EPERM;
6507
6508 if ((ioc->buf_size < 1) &&
6509 (ioc->Request.Type.Direction != XFER_NONE))
6510 return -EINVAL;
6511
6512 if (ioc->malloc_size > MAX_KMALLOC_SIZE)
6513 return -EINVAL;
6514 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD)
6515 return -EINVAL;
6516 buff = kcalloc(SG_ENTRIES_IN_CMD, sizeof(char *), GFP_KERNEL);
6517 if (!buff) {
6518 status = -ENOMEM;
6519 goto cleanup1;
6520 }
6521 buff_size = kmalloc_array(SG_ENTRIES_IN_CMD, sizeof(int), GFP_KERNEL);
6522 if (!buff_size) {
6523 status = -ENOMEM;
6524 goto cleanup1;
6525 }
6526 left = ioc->buf_size;
6527 data_ptr = ioc->buf;
6528 while (left) {
6529 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
6530 buff_size[sg_used] = sz;
6531 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
6532 if (buff[sg_used] == NULL) {
6533 status = -ENOMEM;
6534 goto cleanup1;
6535 }
6536 if (ioc->Request.Type.Direction & XFER_WRITE) {
6537 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
6538 status = -EFAULT;
6539 goto cleanup1;
6540 }
6541 } else
6542 memset(buff[sg_used], 0, sz);
6543 left -= sz;
6544 data_ptr += sz;
6545 sg_used++;
6546 }
6547 c = cmd_alloc(h);
6548
6549 c->cmd_type = CMD_IOCTL_PEND;
6550 c->scsi_cmd = SCSI_CMD_BUSY;
6551 c->Header.ReplyQueue = 0;
6552 c->Header.SGList = (u8) sg_used;
6553 c->Header.SGTotal = cpu_to_le16(sg_used);
6554 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
6555 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
6556 if (ioc->buf_size > 0) {
6557 int i;
6558 for (i = 0; i < sg_used; i++) {
6559 temp64 = dma_map_single(&h->pdev->dev, buff[i],
6560 buff_size[i], DMA_BIDIRECTIONAL);
6561 if (dma_mapping_error(&h->pdev->dev,
6562 (dma_addr_t) temp64)) {
6563 c->SG[i].Addr = cpu_to_le64(0);
6564 c->SG[i].Len = cpu_to_le32(0);
6565 hpsa_pci_unmap(h->pdev, c, i,
6566 DMA_BIDIRECTIONAL);
6567 status = -ENOMEM;
6568 goto cleanup0;
6569 }
6570 c->SG[i].Addr = cpu_to_le64(temp64);
6571 c->SG[i].Len = cpu_to_le32(buff_size[i]);
6572 c->SG[i].Ext = cpu_to_le32(0);
6573 }
6574 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
6575 }
6576 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6577 NO_TIMEOUT);
6578 if (sg_used)
6579 hpsa_pci_unmap(h->pdev, c, sg_used, DMA_BIDIRECTIONAL);
6580 check_ioctl_unit_attention(h, c);
6581 if (status) {
6582 status = -EIO;
6583 goto cleanup0;
6584 }
6585
6586
6587 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
6588 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
6589 int i;
6590
6591
6592 BYTE __user *ptr = ioc->buf;
6593 for (i = 0; i < sg_used; i++) {
6594 if (copy_to_user(ptr, buff[i], buff_size[i])) {
6595 status = -EFAULT;
6596 goto cleanup0;
6597 }
6598 ptr += buff_size[i];
6599 }
6600 }
6601 status = 0;
6602cleanup0:
6603 cmd_free(h, c);
6604cleanup1:
6605 if (buff) {
6606 int i;
6607
6608 for (i = 0; i < sg_used; i++)
6609 kfree(buff[i]);
6610 kfree(buff);
6611 }
6612 kfree(buff_size);
6613 return status;
6614}
6615
6616static void check_ioctl_unit_attention(struct ctlr_info *h,
6617 struct CommandList *c)
6618{
6619 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
6620 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
6621 (void) check_for_unit_attention(h, c);
6622}
6623
6624
6625
6626
6627static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
6628 void __user *argp)
6629{
6630 struct ctlr_info *h = sdev_to_hba(dev);
6631 int rc;
6632
6633 switch (cmd) {
6634 case CCISS_DEREGDISK:
6635 case CCISS_REGNEWDISK:
6636 case CCISS_REGNEWD:
6637 hpsa_scan_start(h->scsi_host);
6638 return 0;
6639 case CCISS_GETPCIINFO:
6640 return hpsa_getpciinfo_ioctl(h, argp);
6641 case CCISS_GETDRIVVER:
6642 return hpsa_getdrivver_ioctl(h, argp);
6643 case CCISS_PASSTHRU: {
6644 IOCTL_Command_struct iocommand;
6645
6646 if (!argp)
6647 return -EINVAL;
6648 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
6649 return -EFAULT;
6650 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6651 return -EAGAIN;
6652 rc = hpsa_passthru_ioctl(h, &iocommand);
6653 atomic_inc(&h->passthru_cmds_avail);
6654 if (!rc && copy_to_user(argp, &iocommand, sizeof(iocommand)))
6655 rc = -EFAULT;
6656 return rc;
6657 }
6658 case CCISS_BIG_PASSTHRU: {
6659 BIG_IOCTL_Command_struct ioc;
6660 if (!argp)
6661 return -EINVAL;
6662 if (copy_from_user(&ioc, argp, sizeof(ioc)))
6663 return -EFAULT;
6664 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6665 return -EAGAIN;
6666 rc = hpsa_big_passthru_ioctl(h, &ioc);
6667 atomic_inc(&h->passthru_cmds_avail);
6668 if (!rc && copy_to_user(argp, &ioc, sizeof(ioc)))
6669 rc = -EFAULT;
6670 return rc;
6671 }
6672 default:
6673 return -ENOTTY;
6674 }
6675}
6676
6677static void hpsa_send_host_reset(struct ctlr_info *h, u8 reset_type)
6678{
6679 struct CommandList *c;
6680
6681 c = cmd_alloc(h);
6682
6683
6684 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
6685 RAID_CTLR_LUNID, TYPE_MSG);
6686 c->Request.CDB[1] = reset_type;
6687 c->waiting = NULL;
6688 enqueue_cmd_and_start_io(h, c);
6689
6690
6691
6692
6693 return;
6694}
6695
6696static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
6697 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
6698 int cmd_type)
6699{
6700 enum dma_data_direction dir = DMA_NONE;
6701
6702 c->cmd_type = CMD_IOCTL_PEND;
6703 c->scsi_cmd = SCSI_CMD_BUSY;
6704 c->Header.ReplyQueue = 0;
6705 if (buff != NULL && size > 0) {
6706 c->Header.SGList = 1;
6707 c->Header.SGTotal = cpu_to_le16(1);
6708 } else {
6709 c->Header.SGList = 0;
6710 c->Header.SGTotal = cpu_to_le16(0);
6711 }
6712 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6713
6714 if (cmd_type == TYPE_CMD) {
6715 switch (cmd) {
6716 case HPSA_INQUIRY:
6717
6718 if (page_code & VPD_PAGE) {
6719 c->Request.CDB[1] = 0x01;
6720 c->Request.CDB[2] = (page_code & 0xff);
6721 }
6722 c->Request.CDBLen = 6;
6723 c->Request.type_attr_dir =
6724 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6725 c->Request.Timeout = 0;
6726 c->Request.CDB[0] = HPSA_INQUIRY;
6727 c->Request.CDB[4] = size & 0xFF;
6728 break;
6729 case RECEIVE_DIAGNOSTIC:
6730 c->Request.CDBLen = 6;
6731 c->Request.type_attr_dir =
6732 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6733 c->Request.Timeout = 0;
6734 c->Request.CDB[0] = cmd;
6735 c->Request.CDB[1] = 1;
6736 c->Request.CDB[2] = 1;
6737 c->Request.CDB[3] = (size >> 8) & 0xFF;
6738 c->Request.CDB[4] = size & 0xFF;
6739 break;
6740 case HPSA_REPORT_LOG:
6741 case HPSA_REPORT_PHYS:
6742
6743
6744
6745 c->Request.CDBLen = 12;
6746 c->Request.type_attr_dir =
6747 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6748 c->Request.Timeout = 0;
6749 c->Request.CDB[0] = cmd;
6750 c->Request.CDB[6] = (size >> 24) & 0xFF;
6751 c->Request.CDB[7] = (size >> 16) & 0xFF;
6752 c->Request.CDB[8] = (size >> 8) & 0xFF;
6753 c->Request.CDB[9] = size & 0xFF;
6754 break;
6755 case BMIC_SENSE_DIAG_OPTIONS:
6756 c->Request.CDBLen = 16;
6757 c->Request.type_attr_dir =
6758 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6759 c->Request.Timeout = 0;
6760
6761 c->Request.CDB[0] = BMIC_READ;
6762 c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS;
6763 break;
6764 case BMIC_SET_DIAG_OPTIONS:
6765 c->Request.CDBLen = 16;
6766 c->Request.type_attr_dir =
6767 TYPE_ATTR_DIR(cmd_type,
6768 ATTR_SIMPLE, XFER_WRITE);
6769 c->Request.Timeout = 0;
6770 c->Request.CDB[0] = BMIC_WRITE;
6771 c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS;
6772 break;
6773 case HPSA_CACHE_FLUSH:
6774 c->Request.CDBLen = 12;
6775 c->Request.type_attr_dir =
6776 TYPE_ATTR_DIR(cmd_type,
6777 ATTR_SIMPLE, XFER_WRITE);
6778 c->Request.Timeout = 0;
6779 c->Request.CDB[0] = BMIC_WRITE;
6780 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
6781 c->Request.CDB[7] = (size >> 8) & 0xFF;
6782 c->Request.CDB[8] = size & 0xFF;
6783 break;
6784 case TEST_UNIT_READY:
6785 c->Request.CDBLen = 6;
6786 c->Request.type_attr_dir =
6787 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6788 c->Request.Timeout = 0;
6789 break;
6790 case HPSA_GET_RAID_MAP:
6791 c->Request.CDBLen = 12;
6792 c->Request.type_attr_dir =
6793 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6794 c->Request.Timeout = 0;
6795 c->Request.CDB[0] = HPSA_CISS_READ;
6796 c->Request.CDB[1] = cmd;
6797 c->Request.CDB[6] = (size >> 24) & 0xFF;
6798 c->Request.CDB[7] = (size >> 16) & 0xFF;
6799 c->Request.CDB[8] = (size >> 8) & 0xFF;
6800 c->Request.CDB[9] = size & 0xFF;
6801 break;
6802 case BMIC_SENSE_CONTROLLER_PARAMETERS:
6803 c->Request.CDBLen = 10;
6804 c->Request.type_attr_dir =
6805 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6806 c->Request.Timeout = 0;
6807 c->Request.CDB[0] = BMIC_READ;
6808 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6809 c->Request.CDB[7] = (size >> 16) & 0xFF;
6810 c->Request.CDB[8] = (size >> 8) & 0xFF;
6811 break;
6812 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6813 c->Request.CDBLen = 10;
6814 c->Request.type_attr_dir =
6815 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6816 c->Request.Timeout = 0;
6817 c->Request.CDB[0] = BMIC_READ;
6818 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
6819 c->Request.CDB[7] = (size >> 16) & 0xFF;
6820 c->Request.CDB[8] = (size >> 8) & 0XFF;
6821 break;
6822 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
6823 c->Request.CDBLen = 10;
6824 c->Request.type_attr_dir =
6825 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6826 c->Request.Timeout = 0;
6827 c->Request.CDB[0] = BMIC_READ;
6828 c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION;
6829 c->Request.CDB[7] = (size >> 16) & 0xFF;
6830 c->Request.CDB[8] = (size >> 8) & 0XFF;
6831 break;
6832 case BMIC_SENSE_STORAGE_BOX_PARAMS:
6833 c->Request.CDBLen = 10;
6834 c->Request.type_attr_dir =
6835 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6836 c->Request.Timeout = 0;
6837 c->Request.CDB[0] = BMIC_READ;
6838 c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS;
6839 c->Request.CDB[7] = (size >> 16) & 0xFF;
6840 c->Request.CDB[8] = (size >> 8) & 0XFF;
6841 break;
6842 case BMIC_IDENTIFY_CONTROLLER:
6843 c->Request.CDBLen = 10;
6844 c->Request.type_attr_dir =
6845 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6846 c->Request.Timeout = 0;
6847 c->Request.CDB[0] = BMIC_READ;
6848 c->Request.CDB[1] = 0;
6849 c->Request.CDB[2] = 0;
6850 c->Request.CDB[3] = 0;
6851 c->Request.CDB[4] = 0;
6852 c->Request.CDB[5] = 0;
6853 c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER;
6854 c->Request.CDB[7] = (size >> 16) & 0xFF;
6855 c->Request.CDB[8] = (size >> 8) & 0XFF;
6856 c->Request.CDB[9] = 0;
6857 break;
6858 default:
6859 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
6860 BUG();
6861 }
6862 } else if (cmd_type == TYPE_MSG) {
6863 switch (cmd) {
6864
6865 case HPSA_PHYS_TARGET_RESET:
6866 c->Request.CDBLen = 16;
6867 c->Request.type_attr_dir =
6868 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6869 c->Request.Timeout = 0;
6870 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6871 c->Request.CDB[0] = HPSA_RESET;
6872 c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE;
6873
6874 c->Request.CDB[4] = 0x00;
6875 c->Request.CDB[5] = 0x00;
6876 c->Request.CDB[6] = 0x00;
6877 c->Request.CDB[7] = 0x00;
6878 break;
6879 case HPSA_DEVICE_RESET_MSG:
6880 c->Request.CDBLen = 16;
6881 c->Request.type_attr_dir =
6882 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6883 c->Request.Timeout = 0;
6884 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6885 c->Request.CDB[0] = cmd;
6886 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
6887
6888
6889 c->Request.CDB[4] = 0x00;
6890 c->Request.CDB[5] = 0x00;
6891 c->Request.CDB[6] = 0x00;
6892 c->Request.CDB[7] = 0x00;
6893 break;
6894 default:
6895 dev_warn(&h->pdev->dev, "unknown message type %d\n",
6896 cmd);
6897 BUG();
6898 }
6899 } else {
6900 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
6901 BUG();
6902 }
6903
6904 switch (GET_DIR(c->Request.type_attr_dir)) {
6905 case XFER_READ:
6906 dir = DMA_FROM_DEVICE;
6907 break;
6908 case XFER_WRITE:
6909 dir = DMA_TO_DEVICE;
6910 break;
6911 case XFER_NONE:
6912 dir = DMA_NONE;
6913 break;
6914 default:
6915 dir = DMA_BIDIRECTIONAL;
6916 }
6917 if (hpsa_map_one(h->pdev, c, buff, size, dir))
6918 return -1;
6919 return 0;
6920}
6921
6922
6923
6924
6925static void __iomem *remap_pci_mem(ulong base, ulong size)
6926{
6927 ulong page_base = ((ulong) base) & PAGE_MASK;
6928 ulong page_offs = ((ulong) base) - page_base;
6929 void __iomem *page_remapped = ioremap(page_base,
6930 page_offs + size);
6931
6932 return page_remapped ? (page_remapped + page_offs) : NULL;
6933}
6934
6935static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
6936{
6937 return h->access.command_completed(h, q);
6938}
6939
6940static inline bool interrupt_pending(struct ctlr_info *h)
6941{
6942 return h->access.intr_pending(h);
6943}
6944
6945static inline long interrupt_not_for_us(struct ctlr_info *h)
6946{
6947 return (h->access.intr_pending(h) == 0) ||
6948 (h->interrupts_enabled == 0);
6949}
6950
6951static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
6952 u32 raw_tag)
6953{
6954 if (unlikely(tag_index >= h->nr_cmds)) {
6955 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
6956 return 1;
6957 }
6958 return 0;
6959}
6960
6961static inline void finish_cmd(struct CommandList *c)
6962{
6963 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
6964 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
6965 || c->cmd_type == CMD_IOACCEL2))
6966 complete_scsi_command(c);
6967 else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
6968 complete(c->waiting);
6969}
6970
6971
6972static inline void process_indexed_cmd(struct ctlr_info *h,
6973 u32 raw_tag)
6974{
6975 u32 tag_index;
6976 struct CommandList *c;
6977
6978 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
6979 if (!bad_tag(h, tag_index, raw_tag)) {
6980 c = h->cmd_pool + tag_index;
6981 finish_cmd(c);
6982 }
6983}
6984
6985
6986
6987
6988
6989
6990static int ignore_bogus_interrupt(struct ctlr_info *h)
6991{
6992 if (likely(!reset_devices))
6993 return 0;
6994
6995 if (likely(h->interrupts_enabled))
6996 return 0;
6997
6998 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
6999 "(known firmware bug.) Ignoring.\n");
7000
7001 return 1;
7002}
7003
7004
7005
7006
7007
7008
7009static struct ctlr_info *queue_to_hba(u8 *queue)
7010{
7011 return container_of((queue - *queue), struct ctlr_info, q[0]);
7012}
7013
7014static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
7015{
7016 struct ctlr_info *h = queue_to_hba(queue);
7017 u8 q = *(u8 *) queue;
7018 u32 raw_tag;
7019
7020 if (ignore_bogus_interrupt(h))
7021 return IRQ_NONE;
7022
7023 if (interrupt_not_for_us(h))
7024 return IRQ_NONE;
7025 h->last_intr_timestamp = get_jiffies_64();
7026 while (interrupt_pending(h)) {
7027 raw_tag = get_next_completion(h, q);
7028 while (raw_tag != FIFO_EMPTY)
7029 raw_tag = next_command(h, q);
7030 }
7031 return IRQ_HANDLED;
7032}
7033
7034static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
7035{
7036 struct ctlr_info *h = queue_to_hba(queue);
7037 u32 raw_tag;
7038 u8 q = *(u8 *) queue;
7039
7040 if (ignore_bogus_interrupt(h))
7041 return IRQ_NONE;
7042
7043 h->last_intr_timestamp = get_jiffies_64();
7044 raw_tag = get_next_completion(h, q);
7045 while (raw_tag != FIFO_EMPTY)
7046 raw_tag = next_command(h, q);
7047 return IRQ_HANDLED;
7048}
7049
7050static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
7051{
7052 struct ctlr_info *h = queue_to_hba((u8 *) queue);
7053 u32 raw_tag;
7054 u8 q = *(u8 *) queue;
7055
7056 if (interrupt_not_for_us(h))
7057 return IRQ_NONE;
7058 h->last_intr_timestamp = get_jiffies_64();
7059 while (interrupt_pending(h)) {
7060 raw_tag = get_next_completion(h, q);
7061 while (raw_tag != FIFO_EMPTY) {
7062 process_indexed_cmd(h, raw_tag);
7063 raw_tag = next_command(h, q);
7064 }
7065 }
7066 return IRQ_HANDLED;
7067}
7068
7069static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
7070{
7071 struct ctlr_info *h = queue_to_hba(queue);
7072 u32 raw_tag;
7073 u8 q = *(u8 *) queue;
7074
7075 h->last_intr_timestamp = get_jiffies_64();
7076 raw_tag = get_next_completion(h, q);
7077 while (raw_tag != FIFO_EMPTY) {
7078 process_indexed_cmd(h, raw_tag);
7079 raw_tag = next_command(h, q);
7080 }
7081 return IRQ_HANDLED;
7082}
7083
7084
7085
7086
7087
7088static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
7089 unsigned char type)
7090{
7091 struct Command {
7092 struct CommandListHeader CommandHeader;
7093 struct RequestBlock Request;
7094 struct ErrDescriptor ErrorDescriptor;
7095 };
7096 struct Command *cmd;
7097 static const size_t cmd_sz = sizeof(*cmd) +
7098 sizeof(cmd->ErrorDescriptor);
7099 dma_addr_t paddr64;
7100 __le32 paddr32;
7101 u32 tag;
7102 void __iomem *vaddr;
7103 int i, err;
7104
7105 vaddr = pci_ioremap_bar(pdev, 0);
7106 if (vaddr == NULL)
7107 return -ENOMEM;
7108
7109
7110
7111
7112
7113 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7114 if (err) {
7115 iounmap(vaddr);
7116 return err;
7117 }
7118
7119 cmd = dma_alloc_coherent(&pdev->dev, cmd_sz, &paddr64, GFP_KERNEL);
7120 if (cmd == NULL) {
7121 iounmap(vaddr);
7122 return -ENOMEM;
7123 }
7124
7125
7126
7127
7128
7129 paddr32 = cpu_to_le32(paddr64);
7130
7131 cmd->CommandHeader.ReplyQueue = 0;
7132 cmd->CommandHeader.SGList = 0;
7133 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
7134 cmd->CommandHeader.tag = cpu_to_le64(paddr64);
7135 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
7136
7137 cmd->Request.CDBLen = 16;
7138 cmd->Request.type_attr_dir =
7139 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
7140 cmd->Request.Timeout = 0;
7141 cmd->Request.CDB[0] = opcode;
7142 cmd->Request.CDB[1] = type;
7143 memset(&cmd->Request.CDB[2], 0, 14);
7144 cmd->ErrorDescriptor.Addr =
7145 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
7146 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
7147
7148 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
7149
7150 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
7151 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
7152 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
7153 break;
7154 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
7155 }
7156
7157 iounmap(vaddr);
7158
7159
7160
7161
7162 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
7163 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
7164 opcode, type);
7165 return -ETIMEDOUT;
7166 }
7167
7168 dma_free_coherent(&pdev->dev, cmd_sz, cmd, paddr64);
7169
7170 if (tag & HPSA_ERROR_BIT) {
7171 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
7172 opcode, type);
7173 return -EIO;
7174 }
7175
7176 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
7177 opcode, type);
7178 return 0;
7179}
7180
7181#define hpsa_noop(p) hpsa_message(p, 3, 0)
7182
7183static int hpsa_controller_hard_reset(struct pci_dev *pdev,
7184 void __iomem *vaddr, u32 use_doorbell)
7185{
7186
7187 if (use_doorbell) {
7188
7189
7190
7191
7192 dev_info(&pdev->dev, "using doorbell to reset controller\n");
7193 writel(use_doorbell, vaddr + SA5_DOORBELL);
7194
7195
7196
7197
7198
7199
7200 msleep(10000);
7201 } else {
7202
7203
7204
7205
7206
7207
7208
7209
7210
7211 int rc = 0;
7212
7213 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
7214
7215
7216 rc = pci_set_power_state(pdev, PCI_D3hot);
7217 if (rc)
7218 return rc;
7219
7220 msleep(500);
7221
7222
7223 rc = pci_set_power_state(pdev, PCI_D0);
7224 if (rc)
7225 return rc;
7226
7227
7228
7229
7230
7231
7232 msleep(500);
7233 }
7234 return 0;
7235}
7236
7237static void init_driver_version(char *driver_version, int len)
7238{
7239 memset(driver_version, 0, len);
7240 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
7241}
7242
7243static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
7244{
7245 char *driver_version;
7246 int i, size = sizeof(cfgtable->driver_version);
7247
7248 driver_version = kmalloc(size, GFP_KERNEL);
7249 if (!driver_version)
7250 return -ENOMEM;
7251
7252 init_driver_version(driver_version, size);
7253 for (i = 0; i < size; i++)
7254 writeb(driver_version[i], &cfgtable->driver_version[i]);
7255 kfree(driver_version);
7256 return 0;
7257}
7258
7259static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
7260 unsigned char *driver_ver)
7261{
7262 int i;
7263
7264 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
7265 driver_ver[i] = readb(&cfgtable->driver_version[i]);
7266}
7267
7268static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
7269{
7270
7271 char *driver_ver, *old_driver_ver;
7272 int rc, size = sizeof(cfgtable->driver_version);
7273
7274 old_driver_ver = kmalloc_array(2, size, GFP_KERNEL);
7275 if (!old_driver_ver)
7276 return -ENOMEM;
7277 driver_ver = old_driver_ver + size;
7278
7279
7280
7281
7282 init_driver_version(old_driver_ver, size);
7283 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
7284 rc = !memcmp(driver_ver, old_driver_ver, size);
7285 kfree(old_driver_ver);
7286 return rc;
7287}
7288
7289
7290
7291static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
7292{
7293 u64 cfg_offset;
7294 u32 cfg_base_addr;
7295 u64 cfg_base_addr_index;
7296 void __iomem *vaddr;
7297 unsigned long paddr;
7298 u32 misc_fw_support;
7299 int rc;
7300 struct CfgTable __iomem *cfgtable;
7301 u32 use_doorbell;
7302 u16 command_register;
7303
7304
7305
7306
7307
7308
7309
7310
7311
7312
7313
7314
7315
7316
7317 if (!ctlr_is_resettable(board_id)) {
7318 dev_warn(&pdev->dev, "Controller not resettable\n");
7319 return -ENODEV;
7320 }
7321
7322
7323 if (!ctlr_is_hard_resettable(board_id))
7324 return -ENOTSUPP;
7325
7326
7327 pci_read_config_word(pdev, 4, &command_register);
7328 pci_save_state(pdev);
7329
7330
7331 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
7332 if (rc)
7333 return rc;
7334 vaddr = remap_pci_mem(paddr, 0x250);
7335 if (!vaddr)
7336 return -ENOMEM;
7337
7338
7339 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
7340 &cfg_base_addr_index, &cfg_offset);
7341 if (rc)
7342 goto unmap_vaddr;
7343 cfgtable = remap_pci_mem(pci_resource_start(pdev,
7344 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
7345 if (!cfgtable) {
7346 rc = -ENOMEM;
7347 goto unmap_vaddr;
7348 }
7349 rc = write_driver_ver_to_cfgtable(cfgtable);
7350 if (rc)
7351 goto unmap_cfgtable;
7352
7353
7354
7355
7356 misc_fw_support = readl(&cfgtable->misc_fw_support);
7357 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
7358 if (use_doorbell) {
7359 use_doorbell = DOORBELL_CTLR_RESET2;
7360 } else {
7361 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
7362 if (use_doorbell) {
7363 dev_warn(&pdev->dev,
7364 "Soft reset not supported. Firmware update is required.\n");
7365 rc = -ENOTSUPP;
7366 goto unmap_cfgtable;
7367 }
7368 }
7369
7370 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
7371 if (rc)
7372 goto unmap_cfgtable;
7373
7374 pci_restore_state(pdev);
7375 pci_write_config_word(pdev, 4, command_register);
7376
7377
7378
7379 msleep(HPSA_POST_RESET_PAUSE_MSECS);
7380
7381 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
7382 if (rc) {
7383 dev_warn(&pdev->dev,
7384 "Failed waiting for board to become ready after hard reset\n");
7385 goto unmap_cfgtable;
7386 }
7387
7388 rc = controller_reset_failed(vaddr);
7389 if (rc < 0)
7390 goto unmap_cfgtable;
7391 if (rc) {
7392 dev_warn(&pdev->dev, "Unable to successfully reset "
7393 "controller. Will try soft reset.\n");
7394 rc = -ENOTSUPP;
7395 } else {
7396 dev_info(&pdev->dev, "board ready after hard reset.\n");
7397 }
7398
7399unmap_cfgtable:
7400 iounmap(cfgtable);
7401
7402unmap_vaddr:
7403 iounmap(vaddr);
7404 return rc;
7405}
7406
7407
7408
7409
7410
7411
7412static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
7413{
7414#ifdef HPSA_DEBUG
7415 int i;
7416 char temp_name[17];
7417
7418 dev_info(dev, "Controller Configuration information\n");
7419 dev_info(dev, "------------------------------------\n");
7420 for (i = 0; i < 4; i++)
7421 temp_name[i] = readb(&(tb->Signature[i]));
7422 temp_name[4] = '\0';
7423 dev_info(dev, " Signature = %s\n", temp_name);
7424 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
7425 dev_info(dev, " Transport methods supported = 0x%x\n",
7426 readl(&(tb->TransportSupport)));
7427 dev_info(dev, " Transport methods active = 0x%x\n",
7428 readl(&(tb->TransportActive)));
7429 dev_info(dev, " Requested transport Method = 0x%x\n",
7430 readl(&(tb->HostWrite.TransportRequest)));
7431 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
7432 readl(&(tb->HostWrite.CoalIntDelay)));
7433 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
7434 readl(&(tb->HostWrite.CoalIntCount)));
7435 dev_info(dev, " Max outstanding commands = %d\n",
7436 readl(&(tb->CmdsOutMax)));
7437 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
7438 for (i = 0; i < 16; i++)
7439 temp_name[i] = readb(&(tb->ServerName[i]));
7440 temp_name[16] = '\0';
7441 dev_info(dev, " Server Name = %s\n", temp_name);
7442 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
7443 readl(&(tb->HeartBeat)));
7444#endif
7445}
7446
7447static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
7448{
7449 int i, offset, mem_type, bar_type;
7450
7451 if (pci_bar_addr == PCI_BASE_ADDRESS_0)
7452 return 0;
7453 offset = 0;
7454 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
7455 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
7456 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
7457 offset += 4;
7458 else {
7459 mem_type = pci_resource_flags(pdev, i) &
7460 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
7461 switch (mem_type) {
7462 case PCI_BASE_ADDRESS_MEM_TYPE_32:
7463 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
7464 offset += 4;
7465 break;
7466 case PCI_BASE_ADDRESS_MEM_TYPE_64:
7467 offset += 8;
7468 break;
7469 default:
7470 dev_warn(&pdev->dev,
7471 "base address is invalid\n");
7472 return -1;
7473 }
7474 }
7475 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
7476 return i + 1;
7477 }
7478 return -1;
7479}
7480
7481static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
7482{
7483 pci_free_irq_vectors(h->pdev);
7484 h->msix_vectors = 0;
7485}
7486
7487static void hpsa_setup_reply_map(struct ctlr_info *h)
7488{
7489 const struct cpumask *mask;
7490 unsigned int queue, cpu;
7491
7492 for (queue = 0; queue < h->msix_vectors; queue++) {
7493 mask = pci_irq_get_affinity(h->pdev, queue);
7494 if (!mask)
7495 goto fallback;
7496
7497 for_each_cpu(cpu, mask)
7498 h->reply_map[cpu] = queue;
7499 }
7500 return;
7501
7502fallback:
7503 for_each_possible_cpu(cpu)
7504 h->reply_map[cpu] = 0;
7505}
7506
7507
7508
7509
7510static int hpsa_interrupt_mode(struct ctlr_info *h)
7511{
7512 unsigned int flags = PCI_IRQ_LEGACY;
7513 int ret;
7514
7515
7516 switch (h->board_id) {
7517 case 0x40700E11:
7518 case 0x40800E11:
7519 case 0x40820E11:
7520 case 0x40830E11:
7521 break;
7522 default:
7523 ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES,
7524 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
7525 if (ret > 0) {
7526 h->msix_vectors = ret;
7527 return 0;
7528 }
7529
7530 flags |= PCI_IRQ_MSI;
7531 break;
7532 }
7533
7534 ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags);
7535 if (ret < 0)
7536 return ret;
7537 return 0;
7538}
7539
7540static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
7541 bool *legacy_board)
7542{
7543 int i;
7544 u32 subsystem_vendor_id, subsystem_device_id;
7545
7546 subsystem_vendor_id = pdev->subsystem_vendor;
7547 subsystem_device_id = pdev->subsystem_device;
7548 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
7549 subsystem_vendor_id;
7550
7551 if (legacy_board)
7552 *legacy_board = false;
7553 for (i = 0; i < ARRAY_SIZE(products); i++)
7554 if (*board_id == products[i].board_id) {
7555 if (products[i].access != &SA5A_access &&
7556 products[i].access != &SA5B_access)
7557 return i;
7558 dev_warn(&pdev->dev,
7559 "legacy board ID: 0x%08x\n",
7560 *board_id);
7561 if (legacy_board)
7562 *legacy_board = true;
7563 return i;
7564 }
7565
7566 dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x\n", *board_id);
7567 if (legacy_board)
7568 *legacy_board = true;
7569 return ARRAY_SIZE(products) - 1;
7570}
7571
7572static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
7573 unsigned long *memory_bar)
7574{
7575 int i;
7576
7577 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
7578 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
7579
7580 *memory_bar = pci_resource_start(pdev, i);
7581 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
7582 *memory_bar);
7583 return 0;
7584 }
7585 dev_warn(&pdev->dev, "no memory BAR found\n");
7586 return -ENODEV;
7587}
7588
7589static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
7590 int wait_for_ready)
7591{
7592 int i, iterations;
7593 u32 scratchpad;
7594 if (wait_for_ready)
7595 iterations = HPSA_BOARD_READY_ITERATIONS;
7596 else
7597 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
7598
7599 for (i = 0; i < iterations; i++) {
7600 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
7601 if (wait_for_ready) {
7602 if (scratchpad == HPSA_FIRMWARE_READY)
7603 return 0;
7604 } else {
7605 if (scratchpad != HPSA_FIRMWARE_READY)
7606 return 0;
7607 }
7608 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
7609 }
7610 dev_warn(&pdev->dev, "board not ready, timed out.\n");
7611 return -ENODEV;
7612}
7613
7614static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
7615 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
7616 u64 *cfg_offset)
7617{
7618 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
7619 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
7620 *cfg_base_addr &= (u32) 0x0000ffff;
7621 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
7622 if (*cfg_base_addr_index == -1) {
7623 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
7624 return -ENODEV;
7625 }
7626 return 0;
7627}
7628
7629static void hpsa_free_cfgtables(struct ctlr_info *h)
7630{
7631 if (h->transtable) {
7632 iounmap(h->transtable);
7633 h->transtable = NULL;
7634 }
7635 if (h->cfgtable) {
7636 iounmap(h->cfgtable);
7637 h->cfgtable = NULL;
7638 }
7639}
7640
7641
7642
7643
7644static int hpsa_find_cfgtables(struct ctlr_info *h)
7645{
7646 u64 cfg_offset;
7647 u32 cfg_base_addr;
7648 u64 cfg_base_addr_index;
7649 u32 trans_offset;
7650 int rc;
7651
7652 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7653 &cfg_base_addr_index, &cfg_offset);
7654 if (rc)
7655 return rc;
7656 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
7657 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
7658 if (!h->cfgtable) {
7659 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
7660 return -ENOMEM;
7661 }
7662 rc = write_driver_ver_to_cfgtable(h->cfgtable);
7663 if (rc)
7664 return rc;
7665
7666 trans_offset = readl(&h->cfgtable->TransMethodOffset);
7667 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
7668 cfg_base_addr_index)+cfg_offset+trans_offset,
7669 sizeof(*h->transtable));
7670 if (!h->transtable) {
7671 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
7672 hpsa_free_cfgtables(h);
7673 return -ENOMEM;
7674 }
7675 return 0;
7676}
7677
7678static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
7679{
7680#define MIN_MAX_COMMANDS 16
7681 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
7682
7683 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
7684
7685
7686 if (reset_devices && h->max_commands > 32)
7687 h->max_commands = 32;
7688
7689 if (h->max_commands < MIN_MAX_COMMANDS) {
7690 dev_warn(&h->pdev->dev,
7691 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7692 h->max_commands,
7693 MIN_MAX_COMMANDS);
7694 h->max_commands = MIN_MAX_COMMANDS;
7695 }
7696}
7697
7698
7699
7700
7701
7702static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
7703{
7704 return h->maxsgentries > 512;
7705}
7706
7707
7708
7709
7710
7711static void hpsa_find_board_params(struct ctlr_info *h)
7712{
7713 hpsa_get_max_perf_mode_cmds(h);
7714 h->nr_cmds = h->max_commands;
7715 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
7716 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
7717 if (hpsa_supports_chained_sg_blocks(h)) {
7718
7719 h->max_cmd_sg_entries = 32;
7720 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
7721 h->maxsgentries--;
7722 } else {
7723
7724
7725
7726
7727
7728 h->max_cmd_sg_entries = 31;
7729 h->maxsgentries = 31;
7730 h->chainsize = 0;
7731 }
7732
7733
7734 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
7735 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7736 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7737 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7738 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
7739 if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7740 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
7741}
7742
7743static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7744{
7745 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
7746 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
7747 return false;
7748 }
7749 return true;
7750}
7751
7752static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
7753{
7754 u32 driver_support;
7755
7756 driver_support = readl(&(h->cfgtable->driver_support));
7757
7758#ifdef CONFIG_X86
7759 driver_support |= ENABLE_SCSI_PREFETCH;
7760#endif
7761 driver_support |= ENABLE_UNIT_ATTN;
7762 writel(driver_support, &(h->cfgtable->driver_support));
7763}
7764
7765
7766
7767
7768static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7769{
7770 u32 dma_prefetch;
7771
7772 if (h->board_id != 0x3225103C)
7773 return;
7774 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7775 dma_prefetch |= 0x8000;
7776 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7777}
7778
7779static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
7780{
7781 int i;
7782 u32 doorbell_value;
7783 unsigned long flags;
7784
7785 for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
7786 spin_lock_irqsave(&h->lock, flags);
7787 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7788 spin_unlock_irqrestore(&h->lock, flags);
7789 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
7790 goto done;
7791
7792 msleep(CLEAR_EVENT_WAIT_INTERVAL);
7793 }
7794 return -ENODEV;
7795done:
7796 return 0;
7797}
7798
7799static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
7800{
7801 int i;
7802 u32 doorbell_value;
7803 unsigned long flags;
7804
7805
7806
7807
7808
7809 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
7810 if (h->remove_in_progress)
7811 goto done;
7812 spin_lock_irqsave(&h->lock, flags);
7813 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7814 spin_unlock_irqrestore(&h->lock, flags);
7815 if (!(doorbell_value & CFGTBL_ChangeReq))
7816 goto done;
7817
7818 msleep(MODE_CHANGE_WAIT_INTERVAL);
7819 }
7820 return -ENODEV;
7821done:
7822 return 0;
7823}
7824
7825
7826static int hpsa_enter_simple_mode(struct ctlr_info *h)
7827{
7828 u32 trans_support;
7829
7830 trans_support = readl(&(h->cfgtable->TransportSupport));
7831 if (!(trans_support & SIMPLE_MODE))
7832 return -ENOTSUPP;
7833
7834 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
7835
7836
7837 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
7838 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7839 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7840 if (hpsa_wait_for_mode_change_ack(h))
7841 goto error;
7842 print_cfg_table(&h->pdev->dev, h->cfgtable);
7843 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
7844 goto error;
7845 h->transMethod = CFGTBL_Trans_Simple;
7846 return 0;
7847error:
7848 dev_err(&h->pdev->dev, "failed to enter simple mode\n");
7849 return -ENODEV;
7850}
7851
7852
7853static void hpsa_free_pci_init(struct ctlr_info *h)
7854{
7855 hpsa_free_cfgtables(h);
7856 iounmap(h->vaddr);
7857 h->vaddr = NULL;
7858 hpsa_disable_interrupt_mode(h);
7859
7860
7861
7862
7863 pci_disable_device(h->pdev);
7864 pci_release_regions(h->pdev);
7865}
7866
7867
7868static int hpsa_pci_init(struct ctlr_info *h)
7869{
7870 int prod_index, err;
7871 bool legacy_board;
7872
7873 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id, &legacy_board);
7874 if (prod_index < 0)
7875 return prod_index;
7876 h->product_name = products[prod_index].product_name;
7877 h->access = *(products[prod_index].access);
7878 h->legacy_board = legacy_board;
7879 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
7880 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
7881
7882 err = pci_enable_device(h->pdev);
7883 if (err) {
7884 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
7885 pci_disable_device(h->pdev);
7886 return err;
7887 }
7888
7889 err = pci_request_regions(h->pdev, HPSA);
7890 if (err) {
7891 dev_err(&h->pdev->dev,
7892 "failed to obtain PCI resources\n");
7893 pci_disable_device(h->pdev);
7894 return err;
7895 }
7896
7897 pci_set_master(h->pdev);
7898
7899 err = hpsa_interrupt_mode(h);
7900 if (err)
7901 goto clean1;
7902
7903
7904 hpsa_setup_reply_map(h);
7905
7906 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
7907 if (err)
7908 goto clean2;
7909 h->vaddr = remap_pci_mem(h->paddr, 0x250);
7910 if (!h->vaddr) {
7911 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
7912 err = -ENOMEM;
7913 goto clean2;
7914 }
7915 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7916 if (err)
7917 goto clean3;
7918 err = hpsa_find_cfgtables(h);
7919 if (err)
7920 goto clean3;
7921 hpsa_find_board_params(h);
7922
7923 if (!hpsa_CISS_signature_present(h)) {
7924 err = -ENODEV;
7925 goto clean4;
7926 }
7927 hpsa_set_driver_support_bits(h);
7928 hpsa_p600_dma_prefetch_quirk(h);
7929 err = hpsa_enter_simple_mode(h);
7930 if (err)
7931 goto clean4;
7932 return 0;
7933
7934clean4:
7935 hpsa_free_cfgtables(h);
7936clean3:
7937 iounmap(h->vaddr);
7938 h->vaddr = NULL;
7939clean2:
7940 hpsa_disable_interrupt_mode(h);
7941clean1:
7942
7943
7944
7945
7946 pci_disable_device(h->pdev);
7947 pci_release_regions(h->pdev);
7948 return err;
7949}
7950
7951static void hpsa_hba_inquiry(struct ctlr_info *h)
7952{
7953 int rc;
7954
7955#define HBA_INQUIRY_BYTE_COUNT 64
7956 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
7957 if (!h->hba_inquiry_data)
7958 return;
7959 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
7960 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
7961 if (rc != 0) {
7962 kfree(h->hba_inquiry_data);
7963 h->hba_inquiry_data = NULL;
7964 }
7965}
7966
7967static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
7968{
7969 int rc, i;
7970 void __iomem *vaddr;
7971
7972 if (!reset_devices)
7973 return 0;
7974
7975
7976
7977
7978
7979 rc = pci_enable_device(pdev);
7980 if (rc) {
7981 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
7982 return -ENODEV;
7983 }
7984 pci_disable_device(pdev);
7985 msleep(260);
7986 rc = pci_enable_device(pdev);
7987 if (rc) {
7988 dev_warn(&pdev->dev, "failed to enable device.\n");
7989 return -ENODEV;
7990 }
7991
7992 pci_set_master(pdev);
7993
7994 vaddr = pci_ioremap_bar(pdev, 0);
7995 if (vaddr == NULL) {
7996 rc = -ENOMEM;
7997 goto out_disable;
7998 }
7999 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
8000 iounmap(vaddr);
8001
8002
8003 rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
8004
8005
8006
8007
8008
8009
8010 if (rc)
8011 goto out_disable;
8012
8013
8014 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
8015 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
8016 if (hpsa_noop(pdev) == 0)
8017 break;
8018 else
8019 dev_warn(&pdev->dev, "no-op failed%s\n",
8020 (i < 11 ? "; re-trying" : ""));
8021 }
8022
8023out_disable:
8024
8025 pci_disable_device(pdev);
8026 return rc;
8027}
8028
8029static void hpsa_free_cmd_pool(struct ctlr_info *h)
8030{
8031 kfree(h->cmd_pool_bits);
8032 h->cmd_pool_bits = NULL;
8033 if (h->cmd_pool) {
8034 dma_free_coherent(&h->pdev->dev,
8035 h->nr_cmds * sizeof(struct CommandList),
8036 h->cmd_pool,
8037 h->cmd_pool_dhandle);
8038 h->cmd_pool = NULL;
8039 h->cmd_pool_dhandle = 0;
8040 }
8041 if (h->errinfo_pool) {
8042 dma_free_coherent(&h->pdev->dev,
8043 h->nr_cmds * sizeof(struct ErrorInfo),
8044 h->errinfo_pool,
8045 h->errinfo_pool_dhandle);
8046 h->errinfo_pool = NULL;
8047 h->errinfo_pool_dhandle = 0;
8048 }
8049}
8050
8051static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
8052{
8053 h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG),
8054 sizeof(unsigned long),
8055 GFP_KERNEL);
8056 h->cmd_pool = dma_alloc_coherent(&h->pdev->dev,
8057 h->nr_cmds * sizeof(*h->cmd_pool),
8058 &h->cmd_pool_dhandle, GFP_KERNEL);
8059 h->errinfo_pool = dma_alloc_coherent(&h->pdev->dev,
8060 h->nr_cmds * sizeof(*h->errinfo_pool),
8061 &h->errinfo_pool_dhandle, GFP_KERNEL);
8062 if ((h->cmd_pool_bits == NULL)
8063 || (h->cmd_pool == NULL)
8064 || (h->errinfo_pool == NULL)) {
8065 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
8066 goto clean_up;
8067 }
8068 hpsa_preinitialize_commands(h);
8069 return 0;
8070clean_up:
8071 hpsa_free_cmd_pool(h);
8072 return -ENOMEM;
8073}
8074
8075
8076static void hpsa_free_irqs(struct ctlr_info *h)
8077{
8078 int i;
8079 int irq_vector = 0;
8080
8081 if (hpsa_simple_mode)
8082 irq_vector = h->intr_mode;
8083
8084 if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
8085
8086 free_irq(pci_irq_vector(h->pdev, irq_vector),
8087 &h->q[h->intr_mode]);
8088 h->q[h->intr_mode] = 0;
8089 return;
8090 }
8091
8092 for (i = 0; i < h->msix_vectors; i++) {
8093 free_irq(pci_irq_vector(h->pdev, i), &h->q[i]);
8094 h->q[i] = 0;
8095 }
8096 for (; i < MAX_REPLY_QUEUES; i++)
8097 h->q[i] = 0;
8098}
8099
8100
8101static int hpsa_request_irqs(struct ctlr_info *h,
8102 irqreturn_t (*msixhandler)(int, void *),
8103 irqreturn_t (*intxhandler)(int, void *))
8104{
8105 int rc, i;
8106 int irq_vector = 0;
8107
8108 if (hpsa_simple_mode)
8109 irq_vector = h->intr_mode;
8110
8111
8112
8113
8114
8115 for (i = 0; i < MAX_REPLY_QUEUES; i++)
8116 h->q[i] = (u8) i;
8117
8118 if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) {
8119
8120 for (i = 0; i < h->msix_vectors; i++) {
8121 sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
8122 rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler,
8123 0, h->intrname[i],
8124 &h->q[i]);
8125 if (rc) {
8126 int j;
8127
8128 dev_err(&h->pdev->dev,
8129 "failed to get irq %d for %s\n",
8130 pci_irq_vector(h->pdev, i), h->devname);
8131 for (j = 0; j < i; j++) {
8132 free_irq(pci_irq_vector(h->pdev, j), &h->q[j]);
8133 h->q[j] = 0;
8134 }
8135 for (; j < MAX_REPLY_QUEUES; j++)
8136 h->q[j] = 0;
8137 return rc;
8138 }
8139 }
8140 } else {
8141
8142 if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
8143 sprintf(h->intrname[0], "%s-msi%s", h->devname,
8144 h->msix_vectors ? "x" : "");
8145 rc = request_irq(pci_irq_vector(h->pdev, irq_vector),
8146 msixhandler, 0,
8147 h->intrname[0],
8148 &h->q[h->intr_mode]);
8149 } else {
8150 sprintf(h->intrname[h->intr_mode],
8151 "%s-intx", h->devname);
8152 rc = request_irq(pci_irq_vector(h->pdev, irq_vector),
8153 intxhandler, IRQF_SHARED,
8154 h->intrname[0],
8155 &h->q[h->intr_mode]);
8156 }
8157 }
8158 if (rc) {
8159 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
8160 pci_irq_vector(h->pdev, irq_vector), h->devname);
8161 hpsa_free_irqs(h);
8162 return -ENODEV;
8163 }
8164 return 0;
8165}
8166
8167static int hpsa_kdump_soft_reset(struct ctlr_info *h)
8168{
8169 int rc;
8170 hpsa_send_host_reset(h, HPSA_RESET_TYPE_CONTROLLER);
8171
8172 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
8173 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
8174 if (rc) {
8175 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
8176 return rc;
8177 }
8178
8179 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
8180 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
8181 if (rc) {
8182 dev_warn(&h->pdev->dev, "Board failed to become ready "
8183 "after soft reset.\n");
8184 return rc;
8185 }
8186
8187 return 0;
8188}
8189
8190static void hpsa_free_reply_queues(struct ctlr_info *h)
8191{
8192 int i;
8193
8194 for (i = 0; i < h->nreply_queues; i++) {
8195 if (!h->reply_queue[i].head)
8196 continue;
8197 dma_free_coherent(&h->pdev->dev,
8198 h->reply_queue_size,
8199 h->reply_queue[i].head,
8200 h->reply_queue[i].busaddr);
8201 h->reply_queue[i].head = NULL;
8202 h->reply_queue[i].busaddr = 0;
8203 }
8204 h->reply_queue_size = 0;
8205}
8206
8207static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
8208{
8209 hpsa_free_performant_mode(h);
8210 hpsa_free_sg_chain_blocks(h);
8211 hpsa_free_cmd_pool(h);
8212 hpsa_free_irqs(h);
8213 scsi_host_put(h->scsi_host);
8214 h->scsi_host = NULL;
8215 hpsa_free_pci_init(h);
8216 free_percpu(h->lockup_detected);
8217 h->lockup_detected = NULL;
8218 if (h->resubmit_wq) {
8219 destroy_workqueue(h->resubmit_wq);
8220 h->resubmit_wq = NULL;
8221 }
8222 if (h->rescan_ctlr_wq) {
8223 destroy_workqueue(h->rescan_ctlr_wq);
8224 h->rescan_ctlr_wq = NULL;
8225 }
8226 if (h->monitor_ctlr_wq) {
8227 destroy_workqueue(h->monitor_ctlr_wq);
8228 h->monitor_ctlr_wq = NULL;
8229 }
8230
8231 kfree(h);
8232}
8233
8234
8235static void fail_all_outstanding_cmds(struct ctlr_info *h)
8236{
8237 int i, refcount;
8238 struct CommandList *c;
8239 int failcount = 0;
8240
8241 flush_workqueue(h->resubmit_wq);
8242 for (i = 0; i < h->nr_cmds; i++) {
8243 c = h->cmd_pool + i;
8244 refcount = atomic_inc_return(&c->refcount);
8245 if (refcount > 1) {
8246 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
8247 finish_cmd(c);
8248 atomic_dec(&h->commands_outstanding);
8249 failcount++;
8250 }
8251 cmd_free(h, c);
8252 }
8253 dev_warn(&h->pdev->dev,
8254 "failed %d commands in fail_all\n", failcount);
8255}
8256
8257static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
8258{
8259 int cpu;
8260
8261 for_each_online_cpu(cpu) {
8262 u32 *lockup_detected;
8263 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
8264 *lockup_detected = value;
8265 }
8266 wmb();
8267}
8268
8269static void controller_lockup_detected(struct ctlr_info *h)
8270{
8271 unsigned long flags;
8272 u32 lockup_detected;
8273
8274 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8275 spin_lock_irqsave(&h->lock, flags);
8276 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
8277 if (!lockup_detected) {
8278
8279 dev_warn(&h->pdev->dev,
8280 "lockup detected after %d but scratchpad register is zero\n",
8281 h->heartbeat_sample_interval / HZ);
8282 lockup_detected = 0xffffffff;
8283 }
8284 set_lockup_detected_for_all_cpus(h, lockup_detected);
8285 spin_unlock_irqrestore(&h->lock, flags);
8286 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
8287 lockup_detected, h->heartbeat_sample_interval / HZ);
8288 if (lockup_detected == 0xffff0000) {
8289 dev_warn(&h->pdev->dev, "Telling controller to do a CHKPT\n");
8290 writel(DOORBELL_GENERATE_CHKPT, h->vaddr + SA5_DOORBELL);
8291 }
8292 pci_disable_device(h->pdev);
8293 fail_all_outstanding_cmds(h);
8294}
8295
8296static int detect_controller_lockup(struct ctlr_info *h)
8297{
8298 u64 now;
8299 u32 heartbeat;
8300 unsigned long flags;
8301
8302 now = get_jiffies_64();
8303
8304 if (time_after64(h->last_intr_timestamp +
8305 (h->heartbeat_sample_interval), now))
8306 return false;
8307
8308
8309
8310
8311
8312
8313 if (time_after64(h->last_heartbeat_timestamp +
8314 (h->heartbeat_sample_interval), now))
8315 return false;
8316
8317
8318 spin_lock_irqsave(&h->lock, flags);
8319 heartbeat = readl(&h->cfgtable->HeartBeat);
8320 spin_unlock_irqrestore(&h->lock, flags);
8321 if (h->last_heartbeat == heartbeat) {
8322 controller_lockup_detected(h);
8323 return true;
8324 }
8325
8326
8327 h->last_heartbeat = heartbeat;
8328 h->last_heartbeat_timestamp = now;
8329 return false;
8330}
8331
8332
8333
8334
8335
8336
8337
8338
8339
8340
8341static void hpsa_set_ioaccel_status(struct ctlr_info *h)
8342{
8343 int rc;
8344 int i;
8345 u8 ioaccel_status;
8346 unsigned char *buf;
8347 struct hpsa_scsi_dev_t *device;
8348
8349 if (!h)
8350 return;
8351
8352 buf = kmalloc(64, GFP_KERNEL);
8353 if (!buf)
8354 return;
8355
8356
8357
8358
8359 for (i = 0; i < h->ndevices; i++) {
8360 int offload_to_be_enabled = 0;
8361 int offload_config = 0;
8362
8363 device = h->dev[i];
8364
8365 if (!device)
8366 continue;
8367 if (!hpsa_vpd_page_supported(h, device->scsi3addr,
8368 HPSA_VPD_LV_IOACCEL_STATUS))
8369 continue;
8370
8371 memset(buf, 0, 64);
8372
8373 rc = hpsa_scsi_do_inquiry(h, device->scsi3addr,
8374 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS,
8375 buf, 64);
8376 if (rc != 0)
8377 continue;
8378
8379 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
8380
8381
8382
8383
8384 offload_config =
8385 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
8386
8387
8388
8389
8390 if (offload_config)
8391 offload_to_be_enabled =
8392 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
8393
8394
8395
8396
8397
8398
8399 if (offload_to_be_enabled)
8400 continue;
8401
8402
8403
8404
8405
8406
8407
8408 hpsa_turn_off_ioaccel_for_device(device);
8409 }
8410
8411 kfree(buf);
8412}
8413
8414static void hpsa_ack_ctlr_events(struct ctlr_info *h)
8415{
8416 char *event_type;
8417
8418 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8419 return;
8420
8421
8422 if ((h->transMethod & (CFGTBL_Trans_io_accel1
8423 | CFGTBL_Trans_io_accel2)) &&
8424 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
8425 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
8426
8427 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
8428 event_type = "state change";
8429 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
8430 event_type = "configuration change";
8431
8432 scsi_block_requests(h->scsi_host);
8433 hpsa_set_ioaccel_status(h);
8434 hpsa_drain_accel_commands(h);
8435
8436 dev_warn(&h->pdev->dev,
8437 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
8438 h->events, event_type);
8439 writel(h->events, &(h->cfgtable->clear_event_notify));
8440
8441 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8442
8443 hpsa_wait_for_clear_event_notify_ack(h);
8444 scsi_unblock_requests(h->scsi_host);
8445 } else {
8446
8447 writel(h->events, &(h->cfgtable->clear_event_notify));
8448 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8449 hpsa_wait_for_clear_event_notify_ack(h);
8450 }
8451 return;
8452}
8453
8454
8455
8456
8457
8458
8459static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
8460{
8461 if (h->drv_req_rescan) {
8462 h->drv_req_rescan = 0;
8463 return 1;
8464 }
8465
8466 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8467 return 0;
8468
8469 h->events = readl(&(h->cfgtable->event_notify));
8470 return h->events & RESCAN_REQUIRED_EVENT_BITS;
8471}
8472
8473
8474
8475
8476static int hpsa_offline_devices_ready(struct ctlr_info *h)
8477{
8478 unsigned long flags;
8479 struct offline_device_entry *d;
8480 struct list_head *this, *tmp;
8481
8482 spin_lock_irqsave(&h->offline_device_lock, flags);
8483 list_for_each_safe(this, tmp, &h->offline_device_list) {
8484 d = list_entry(this, struct offline_device_entry,
8485 offline_list);
8486 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8487 if (!hpsa_volume_offline(h, d->scsi3addr)) {
8488 spin_lock_irqsave(&h->offline_device_lock, flags);
8489 list_del(&d->offline_list);
8490 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8491 return 1;
8492 }
8493 spin_lock_irqsave(&h->offline_device_lock, flags);
8494 }
8495 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8496 return 0;
8497}
8498
8499static int hpsa_luns_changed(struct ctlr_info *h)
8500{
8501 int rc = 1;
8502 struct ReportLUNdata *logdev = NULL;
8503
8504
8505
8506
8507
8508 if (!h->lastlogicals)
8509 return rc;
8510
8511 logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
8512 if (!logdev)
8513 return rc;
8514
8515 if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
8516 dev_warn(&h->pdev->dev,
8517 "report luns failed, can't track lun changes.\n");
8518 goto out;
8519 }
8520 if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) {
8521 dev_info(&h->pdev->dev,
8522 "Lun changes detected.\n");
8523 memcpy(h->lastlogicals, logdev, sizeof(*logdev));
8524 goto out;
8525 } else
8526 rc = 0;
8527out:
8528 kfree(logdev);
8529 return rc;
8530}
8531
8532static void hpsa_perform_rescan(struct ctlr_info *h)
8533{
8534 struct Scsi_Host *sh = NULL;
8535 unsigned long flags;
8536
8537
8538
8539
8540 spin_lock_irqsave(&h->reset_lock, flags);
8541 if (h->reset_in_progress) {
8542 h->drv_req_rescan = 1;
8543 spin_unlock_irqrestore(&h->reset_lock, flags);
8544 return;
8545 }
8546 spin_unlock_irqrestore(&h->reset_lock, flags);
8547
8548 sh = scsi_host_get(h->scsi_host);
8549 if (sh != NULL) {
8550 hpsa_scan_start(sh);
8551 scsi_host_put(sh);
8552 h->drv_req_rescan = 0;
8553 }
8554}
8555
8556
8557
8558
8559static void hpsa_event_monitor_worker(struct work_struct *work)
8560{
8561 struct ctlr_info *h = container_of(to_delayed_work(work),
8562 struct ctlr_info, event_monitor_work);
8563 unsigned long flags;
8564
8565 spin_lock_irqsave(&h->lock, flags);
8566 if (h->remove_in_progress) {
8567 spin_unlock_irqrestore(&h->lock, flags);
8568 return;
8569 }
8570 spin_unlock_irqrestore(&h->lock, flags);
8571
8572 if (hpsa_ctlr_needs_rescan(h)) {
8573 hpsa_ack_ctlr_events(h);
8574 hpsa_perform_rescan(h);
8575 }
8576
8577 spin_lock_irqsave(&h->lock, flags);
8578 if (!h->remove_in_progress)
8579 queue_delayed_work(h->monitor_ctlr_wq, &h->event_monitor_work,
8580 HPSA_EVENT_MONITOR_INTERVAL);
8581 spin_unlock_irqrestore(&h->lock, flags);
8582}
8583
8584static void hpsa_rescan_ctlr_worker(struct work_struct *work)
8585{
8586 unsigned long flags;
8587 struct ctlr_info *h = container_of(to_delayed_work(work),
8588 struct ctlr_info, rescan_ctlr_work);
8589
8590 spin_lock_irqsave(&h->lock, flags);
8591 if (h->remove_in_progress) {
8592 spin_unlock_irqrestore(&h->lock, flags);
8593 return;
8594 }
8595 spin_unlock_irqrestore(&h->lock, flags);
8596
8597 if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) {
8598 hpsa_perform_rescan(h);
8599 } else if (h->discovery_polling) {
8600 if (hpsa_luns_changed(h)) {
8601 dev_info(&h->pdev->dev,
8602 "driver discovery polling rescan.\n");
8603 hpsa_perform_rescan(h);
8604 }
8605 }
8606 spin_lock_irqsave(&h->lock, flags);
8607 if (!h->remove_in_progress)
8608 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8609 h->heartbeat_sample_interval);
8610 spin_unlock_irqrestore(&h->lock, flags);
8611}
8612
8613static void hpsa_monitor_ctlr_worker(struct work_struct *work)
8614{
8615 unsigned long flags;
8616 struct ctlr_info *h = container_of(to_delayed_work(work),
8617 struct ctlr_info, monitor_ctlr_work);
8618
8619 detect_controller_lockup(h);
8620 if (lockup_detected(h))
8621 return;
8622
8623 spin_lock_irqsave(&h->lock, flags);
8624 if (!h->remove_in_progress)
8625 queue_delayed_work(h->monitor_ctlr_wq, &h->monitor_ctlr_work,
8626 h->heartbeat_sample_interval);
8627 spin_unlock_irqrestore(&h->lock, flags);
8628}
8629
8630static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
8631 char *name)
8632{
8633 struct workqueue_struct *wq = NULL;
8634
8635 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
8636 if (!wq)
8637 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
8638
8639 return wq;
8640}
8641
8642static void hpda_free_ctlr_info(struct ctlr_info *h)
8643{
8644 kfree(h->reply_map);
8645 kfree(h);
8646}
8647
8648static struct ctlr_info *hpda_alloc_ctlr_info(void)
8649{
8650 struct ctlr_info *h;
8651
8652 h = kzalloc(sizeof(*h), GFP_KERNEL);
8653 if (!h)
8654 return NULL;
8655
8656 h->reply_map = kcalloc(nr_cpu_ids, sizeof(*h->reply_map), GFP_KERNEL);
8657 if (!h->reply_map) {
8658 kfree(h);
8659 return NULL;
8660 }
8661 return h;
8662}
8663
8664static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8665{
8666 int rc;
8667 struct ctlr_info *h;
8668 int try_soft_reset = 0;
8669 unsigned long flags;
8670 u32 board_id;
8671
8672 if (number_of_controllers == 0)
8673 printk(KERN_INFO DRIVER_NAME "\n");
8674
8675 rc = hpsa_lookup_board_id(pdev, &board_id, NULL);
8676 if (rc < 0) {
8677 dev_warn(&pdev->dev, "Board ID not found\n");
8678 return rc;
8679 }
8680
8681 rc = hpsa_init_reset_devices(pdev, board_id);
8682 if (rc) {
8683 if (rc != -ENOTSUPP)
8684 return rc;
8685
8686
8687
8688
8689
8690 try_soft_reset = 1;
8691 rc = 0;
8692 }
8693
8694reinit_after_soft_reset:
8695
8696
8697
8698
8699
8700 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
8701 h = hpda_alloc_ctlr_info();
8702 if (!h) {
8703 dev_err(&pdev->dev, "Failed to allocate controller head\n");
8704 return -ENOMEM;
8705 }
8706
8707 h->pdev = pdev;
8708
8709 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
8710 INIT_LIST_HEAD(&h->offline_device_list);
8711 spin_lock_init(&h->lock);
8712 spin_lock_init(&h->offline_device_lock);
8713 spin_lock_init(&h->scan_lock);
8714 spin_lock_init(&h->reset_lock);
8715 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
8716
8717
8718 h->lockup_detected = alloc_percpu(u32);
8719 if (!h->lockup_detected) {
8720 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
8721 rc = -ENOMEM;
8722 goto clean1;
8723 }
8724 set_lockup_detected_for_all_cpus(h, 0);
8725
8726 rc = hpsa_pci_init(h);
8727 if (rc)
8728 goto clean2;
8729
8730
8731
8732 rc = hpsa_scsi_host_alloc(h);
8733 if (rc)
8734 goto clean2_5;
8735
8736 sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
8737 h->ctlr = number_of_controllers;
8738 number_of_controllers++;
8739
8740
8741 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
8742 if (rc != 0) {
8743 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
8744 if (rc != 0) {
8745 dev_err(&pdev->dev, "no suitable DMA available\n");
8746 goto clean3;
8747 }
8748 }
8749
8750
8751 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8752
8753 rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
8754 if (rc)
8755 goto clean3;
8756 rc = hpsa_alloc_cmd_pool(h);
8757 if (rc)
8758 goto clean4;
8759 rc = hpsa_alloc_sg_chain_blocks(h);
8760 if (rc)
8761 goto clean5;
8762 init_waitqueue_head(&h->scan_wait_queue);
8763 init_waitqueue_head(&h->event_sync_wait_queue);
8764 mutex_init(&h->reset_mutex);
8765 h->scan_finished = 1;
8766 h->scan_waiting = 0;
8767
8768 pci_set_drvdata(pdev, h);
8769 h->ndevices = 0;
8770
8771 spin_lock_init(&h->devlock);
8772 rc = hpsa_put_ctlr_into_performant_mode(h);
8773 if (rc)
8774 goto clean6;
8775
8776
8777 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
8778 if (!h->rescan_ctlr_wq) {
8779 rc = -ENOMEM;
8780 goto clean7;
8781 }
8782
8783 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
8784 if (!h->resubmit_wq) {
8785 rc = -ENOMEM;
8786 goto clean7;
8787 }
8788
8789 h->monitor_ctlr_wq = hpsa_create_controller_wq(h, "monitor");
8790 if (!h->monitor_ctlr_wq) {
8791 rc = -ENOMEM;
8792 goto clean7;
8793 }
8794
8795
8796
8797
8798
8799
8800 if (try_soft_reset) {
8801
8802
8803
8804
8805
8806
8807
8808
8809 spin_lock_irqsave(&h->lock, flags);
8810 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8811 spin_unlock_irqrestore(&h->lock, flags);
8812 hpsa_free_irqs(h);
8813 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
8814 hpsa_intx_discard_completions);
8815 if (rc) {
8816 dev_warn(&h->pdev->dev,
8817 "Failed to request_irq after soft reset.\n");
8818
8819
8820
8821
8822 hpsa_free_performant_mode(h);
8823 hpsa_free_sg_chain_blocks(h);
8824 hpsa_free_cmd_pool(h);
8825
8826
8827
8828
8829 goto clean3;
8830 }
8831
8832 rc = hpsa_kdump_soft_reset(h);
8833 if (rc)
8834
8835 goto clean7;
8836
8837 dev_info(&h->pdev->dev, "Board READY.\n");
8838 dev_info(&h->pdev->dev,
8839 "Waiting for stale completions to drain.\n");
8840 h->access.set_intr_mask(h, HPSA_INTR_ON);
8841 msleep(10000);
8842 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8843
8844 rc = controller_reset_failed(h->cfgtable);
8845 if (rc)
8846 dev_info(&h->pdev->dev,
8847 "Soft reset appears to have failed.\n");
8848
8849
8850
8851
8852
8853 hpsa_undo_allocations_after_kdump_soft_reset(h);
8854 try_soft_reset = 0;
8855 if (rc)
8856
8857 return -ENODEV;
8858
8859 goto reinit_after_soft_reset;
8860 }
8861
8862
8863 h->acciopath_status = 1;
8864
8865 h->discovery_polling = 0;
8866
8867
8868
8869 h->access.set_intr_mask(h, HPSA_INTR_ON);
8870
8871 hpsa_hba_inquiry(h);
8872
8873 h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL);
8874 if (!h->lastlogicals)
8875 dev_info(&h->pdev->dev,
8876 "Can't track change to report lun data\n");
8877
8878
8879 rc = hpsa_scsi_add_host(h);
8880 if (rc)
8881 goto clean8;
8882
8883
8884 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
8885 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
8886 schedule_delayed_work(&h->monitor_ctlr_work,
8887 h->heartbeat_sample_interval);
8888 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
8889 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8890 h->heartbeat_sample_interval);
8891 INIT_DELAYED_WORK(&h->event_monitor_work, hpsa_event_monitor_worker);
8892 schedule_delayed_work(&h->event_monitor_work,
8893 HPSA_EVENT_MONITOR_INTERVAL);
8894 return 0;
8895
8896clean8:
8897 kfree(h->lastlogicals);
8898clean7:
8899 hpsa_free_performant_mode(h);
8900 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8901clean6:
8902 hpsa_free_sg_chain_blocks(h);
8903clean5:
8904 hpsa_free_cmd_pool(h);
8905clean4:
8906 hpsa_free_irqs(h);
8907clean3:
8908 scsi_host_put(h->scsi_host);
8909 h->scsi_host = NULL;
8910clean2_5:
8911 hpsa_free_pci_init(h);
8912clean2:
8913 if (h->lockup_detected) {
8914 free_percpu(h->lockup_detected);
8915 h->lockup_detected = NULL;
8916 }
8917clean1:
8918 if (h->resubmit_wq) {
8919 destroy_workqueue(h->resubmit_wq);
8920 h->resubmit_wq = NULL;
8921 }
8922 if (h->rescan_ctlr_wq) {
8923 destroy_workqueue(h->rescan_ctlr_wq);
8924 h->rescan_ctlr_wq = NULL;
8925 }
8926 if (h->monitor_ctlr_wq) {
8927 destroy_workqueue(h->monitor_ctlr_wq);
8928 h->monitor_ctlr_wq = NULL;
8929 }
8930 kfree(h);
8931 return rc;
8932}
8933
8934static void hpsa_flush_cache(struct ctlr_info *h)
8935{
8936 char *flush_buf;
8937 struct CommandList *c;
8938 int rc;
8939
8940 if (unlikely(lockup_detected(h)))
8941 return;
8942 flush_buf = kzalloc(4, GFP_KERNEL);
8943 if (!flush_buf)
8944 return;
8945
8946 c = cmd_alloc(h);
8947
8948 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8949 RAID_CTLR_LUNID, TYPE_CMD)) {
8950 goto out;
8951 }
8952 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
8953 DEFAULT_TIMEOUT);
8954 if (rc)
8955 goto out;
8956 if (c->err_info->CommandStatus != 0)
8957out:
8958 dev_warn(&h->pdev->dev,
8959 "error flushing cache on controller\n");
8960 cmd_free(h, c);
8961 kfree(flush_buf);
8962}
8963
8964
8965
8966
8967static void hpsa_disable_rld_caching(struct ctlr_info *h)
8968{
8969 u32 *options;
8970 struct CommandList *c;
8971 int rc;
8972
8973
8974 if (unlikely(h->lockup_detected))
8975 return;
8976
8977 options = kzalloc(sizeof(*options), GFP_KERNEL);
8978 if (!options)
8979 return;
8980
8981 c = cmd_alloc(h);
8982
8983
8984 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
8985 RAID_CTLR_LUNID, TYPE_CMD))
8986 goto errout;
8987
8988 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
8989 NO_TIMEOUT);
8990 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8991 goto errout;
8992
8993
8994 *options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING;
8995
8996 if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0,
8997 RAID_CTLR_LUNID, TYPE_CMD))
8998 goto errout;
8999
9000 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
9001 NO_TIMEOUT);
9002 if ((rc != 0) || (c->err_info->CommandStatus != 0))
9003 goto errout;
9004
9005
9006 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
9007 RAID_CTLR_LUNID, TYPE_CMD))
9008 goto errout;
9009
9010 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
9011 NO_TIMEOUT);
9012 if ((rc != 0) || (c->err_info->CommandStatus != 0))
9013 goto errout;
9014
9015 if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
9016 goto out;
9017
9018errout:
9019 dev_err(&h->pdev->dev,
9020 "Error: failed to disable report lun data caching.\n");
9021out:
9022 cmd_free(h, c);
9023 kfree(options);
9024}
9025
9026static void __hpsa_shutdown(struct pci_dev *pdev)
9027{
9028 struct ctlr_info *h;
9029
9030 h = pci_get_drvdata(pdev);
9031
9032
9033
9034
9035 hpsa_flush_cache(h);
9036 h->access.set_intr_mask(h, HPSA_INTR_OFF);
9037 hpsa_free_irqs(h);
9038 hpsa_disable_interrupt_mode(h);
9039}
9040
9041static void hpsa_shutdown(struct pci_dev *pdev)
9042{
9043 __hpsa_shutdown(pdev);
9044 pci_disable_device(pdev);
9045}
9046
9047static void hpsa_free_device_info(struct ctlr_info *h)
9048{
9049 int i;
9050
9051 for (i = 0; i < h->ndevices; i++) {
9052 kfree(h->dev[i]);
9053 h->dev[i] = NULL;
9054 }
9055}
9056
9057static void hpsa_remove_one(struct pci_dev *pdev)
9058{
9059 struct ctlr_info *h;
9060 unsigned long flags;
9061
9062 if (pci_get_drvdata(pdev) == NULL) {
9063 dev_err(&pdev->dev, "unable to remove device\n");
9064 return;
9065 }
9066 h = pci_get_drvdata(pdev);
9067
9068
9069 spin_lock_irqsave(&h->lock, flags);
9070 h->remove_in_progress = 1;
9071 spin_unlock_irqrestore(&h->lock, flags);
9072 cancel_delayed_work_sync(&h->monitor_ctlr_work);
9073 cancel_delayed_work_sync(&h->rescan_ctlr_work);
9074 cancel_delayed_work_sync(&h->event_monitor_work);
9075 destroy_workqueue(h->rescan_ctlr_wq);
9076 destroy_workqueue(h->resubmit_wq);
9077 destroy_workqueue(h->monitor_ctlr_wq);
9078
9079 hpsa_delete_sas_host(h);
9080
9081
9082
9083
9084
9085
9086
9087 if (h->scsi_host)
9088 scsi_remove_host(h->scsi_host);
9089
9090
9091 __hpsa_shutdown(pdev);
9092
9093 hpsa_free_device_info(h);
9094
9095 kfree(h->hba_inquiry_data);
9096 h->hba_inquiry_data = NULL;
9097 hpsa_free_ioaccel2_sg_chain_blocks(h);
9098 hpsa_free_performant_mode(h);
9099 hpsa_free_sg_chain_blocks(h);
9100 hpsa_free_cmd_pool(h);
9101 kfree(h->lastlogicals);
9102
9103
9104
9105 scsi_host_put(h->scsi_host);
9106 h->scsi_host = NULL;
9107
9108
9109 hpsa_free_pci_init(h);
9110
9111 free_percpu(h->lockup_detected);
9112 h->lockup_detected = NULL;
9113
9114
9115 hpda_free_ctlr_info(h);
9116}
9117
9118static int __maybe_unused hpsa_suspend(
9119 __attribute__((unused)) struct device *dev)
9120{
9121 return -ENOSYS;
9122}
9123
9124static int __maybe_unused hpsa_resume
9125 (__attribute__((unused)) struct device *dev)
9126{
9127 return -ENOSYS;
9128}
9129
9130static SIMPLE_DEV_PM_OPS(hpsa_pm_ops, hpsa_suspend, hpsa_resume);
9131
9132static struct pci_driver hpsa_pci_driver = {
9133 .name = HPSA,
9134 .probe = hpsa_init_one,
9135 .remove = hpsa_remove_one,
9136 .id_table = hpsa_pci_device_id,
9137 .shutdown = hpsa_shutdown,
9138 .driver.pm = &hpsa_pm_ops,
9139};
9140
9141
9142
9143
9144
9145
9146
9147
9148
9149
9150
9151
9152
9153static void calc_bucket_map(int bucket[], int num_buckets,
9154 int nsgs, int min_blocks, u32 *bucket_map)
9155{
9156 int i, j, b, size;
9157
9158
9159 for (i = 0; i <= nsgs; i++) {
9160
9161 size = i + min_blocks;
9162 b = num_buckets;
9163
9164 for (j = 0; j < num_buckets; j++) {
9165 if (bucket[j] >= size) {
9166 b = j;
9167 break;
9168 }
9169 }
9170
9171 bucket_map[i] = b;
9172 }
9173}
9174
9175
9176
9177
9178
9179static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
9180{
9181 int i;
9182 unsigned long register_value;
9183 unsigned long transMethod = CFGTBL_Trans_Performant |
9184 (trans_support & CFGTBL_Trans_use_short_tags) |
9185 CFGTBL_Trans_enable_directed_msix |
9186 (trans_support & (CFGTBL_Trans_io_accel1 |
9187 CFGTBL_Trans_io_accel2));
9188 struct access_method access = SA5_performant_access;
9189
9190
9191
9192
9193
9194
9195
9196
9197
9198
9199
9200
9201
9202
9203
9204
9205
9206
9207 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
9208#define MIN_IOACCEL2_BFT_ENTRY 5
9209#define HPSA_IOACCEL2_HEADER_SZ 4
9210 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
9211 13, 14, 15, 16, 17, 18, 19,
9212 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
9213 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
9214 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
9215 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
9216 16 * MIN_IOACCEL2_BFT_ENTRY);
9217 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
9218 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
9219
9220
9221
9222
9223
9224
9225
9226
9227
9228
9229 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
9230 access = SA5_performant_access_no_read;
9231
9232
9233 for (i = 0; i < h->nreply_queues; i++)
9234 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
9235
9236 bft[7] = SG_ENTRIES_IN_CMD + 4;
9237 calc_bucket_map(bft, ARRAY_SIZE(bft),
9238 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
9239 for (i = 0; i < 8; i++)
9240 writel(bft[i], &h->transtable->BlockFetch[i]);
9241
9242
9243 writel(h->max_commands, &h->transtable->RepQSize);
9244 writel(h->nreply_queues, &h->transtable->RepQCount);
9245 writel(0, &h->transtable->RepQCtrAddrLow32);
9246 writel(0, &h->transtable->RepQCtrAddrHigh32);
9247
9248 for (i = 0; i < h->nreply_queues; i++) {
9249 writel(0, &h->transtable->RepQAddr[i].upper);
9250 writel(h->reply_queue[i].busaddr,
9251 &h->transtable->RepQAddr[i].lower);
9252 }
9253
9254 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
9255 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
9256
9257
9258
9259 if (trans_support & CFGTBL_Trans_io_accel1) {
9260 access = SA5_ioaccel_mode1_access;
9261 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
9262 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
9263 } else
9264 if (trans_support & CFGTBL_Trans_io_accel2)
9265 access = SA5_ioaccel_mode2_access;
9266 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9267 if (hpsa_wait_for_mode_change_ack(h)) {
9268 dev_err(&h->pdev->dev,
9269 "performant mode problem - doorbell timeout\n");
9270 return -ENODEV;
9271 }
9272 register_value = readl(&(h->cfgtable->TransportActive));
9273 if (!(register_value & CFGTBL_Trans_Performant)) {
9274 dev_err(&h->pdev->dev,
9275 "performant mode problem - transport not active\n");
9276 return -ENODEV;
9277 }
9278
9279 h->access = access;
9280 h->transMethod = transMethod;
9281
9282 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
9283 (trans_support & CFGTBL_Trans_io_accel2)))
9284 return 0;
9285
9286 if (trans_support & CFGTBL_Trans_io_accel1) {
9287
9288 for (i = 0; i < h->nreply_queues; i++) {
9289 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
9290 h->reply_queue[i].current_entry =
9291 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
9292 }
9293 bft[7] = h->ioaccel_maxsg + 8;
9294 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
9295 h->ioaccel1_blockFetchTable);
9296
9297
9298 for (i = 0; i < h->nreply_queues; i++)
9299 memset(h->reply_queue[i].head,
9300 (u8) IOACCEL_MODE1_REPLY_UNUSED,
9301 h->reply_queue_size);
9302
9303
9304
9305
9306 for (i = 0; i < h->nr_cmds; i++) {
9307 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
9308
9309 cp->function = IOACCEL1_FUNCTION_SCSIIO;
9310 cp->err_info = (u32) (h->errinfo_pool_dhandle +
9311 (i * sizeof(struct ErrorInfo)));
9312 cp->err_info_len = sizeof(struct ErrorInfo);
9313 cp->sgl_offset = IOACCEL1_SGLOFFSET;
9314 cp->host_context_flags =
9315 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
9316 cp->timeout_sec = 0;
9317 cp->ReplyQueue = 0;
9318 cp->tag =
9319 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
9320 cp->host_addr =
9321 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
9322 (i * sizeof(struct io_accel1_cmd)));
9323 }
9324 } else if (trans_support & CFGTBL_Trans_io_accel2) {
9325 u64 cfg_offset, cfg_base_addr_index;
9326 u32 bft2_offset, cfg_base_addr;
9327
9328 hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
9329 &cfg_base_addr_index, &cfg_offset);
9330 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
9331 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
9332 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
9333 4, h->ioaccel2_blockFetchTable);
9334 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
9335 BUILD_BUG_ON(offsetof(struct CfgTable,
9336 io_accel_request_size_offset) != 0xb8);
9337 h->ioaccel2_bft2_regs =
9338 remap_pci_mem(pci_resource_start(h->pdev,
9339 cfg_base_addr_index) +
9340 cfg_offset + bft2_offset,
9341 ARRAY_SIZE(bft2) *
9342 sizeof(*h->ioaccel2_bft2_regs));
9343 for (i = 0; i < ARRAY_SIZE(bft2); i++)
9344 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
9345 }
9346 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9347 if (hpsa_wait_for_mode_change_ack(h)) {
9348 dev_err(&h->pdev->dev,
9349 "performant mode problem - enabling ioaccel mode\n");
9350 return -ENODEV;
9351 }
9352 return 0;
9353}
9354
9355
9356static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9357{
9358 if (h->ioaccel_cmd_pool) {
9359 dma_free_coherent(&h->pdev->dev,
9360 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9361 h->ioaccel_cmd_pool,
9362 h->ioaccel_cmd_pool_dhandle);
9363 h->ioaccel_cmd_pool = NULL;
9364 h->ioaccel_cmd_pool_dhandle = 0;
9365 }
9366 kfree(h->ioaccel1_blockFetchTable);
9367 h->ioaccel1_blockFetchTable = NULL;
9368}
9369
9370
9371static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9372{
9373 h->ioaccel_maxsg =
9374 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9375 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
9376 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
9377
9378
9379
9380
9381
9382 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
9383 IOACCEL1_COMMANDLIST_ALIGNMENT);
9384 h->ioaccel_cmd_pool =
9385 dma_alloc_coherent(&h->pdev->dev,
9386 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9387 &h->ioaccel_cmd_pool_dhandle, GFP_KERNEL);
9388
9389 h->ioaccel1_blockFetchTable =
9390 kmalloc(((h->ioaccel_maxsg + 1) *
9391 sizeof(u32)), GFP_KERNEL);
9392
9393 if ((h->ioaccel_cmd_pool == NULL) ||
9394 (h->ioaccel1_blockFetchTable == NULL))
9395 goto clean_up;
9396
9397 memset(h->ioaccel_cmd_pool, 0,
9398 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
9399 return 0;
9400
9401clean_up:
9402 hpsa_free_ioaccel1_cmd_and_bft(h);
9403 return -ENOMEM;
9404}
9405
9406
9407static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9408{
9409 hpsa_free_ioaccel2_sg_chain_blocks(h);
9410
9411 if (h->ioaccel2_cmd_pool) {
9412 dma_free_coherent(&h->pdev->dev,
9413 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9414 h->ioaccel2_cmd_pool,
9415 h->ioaccel2_cmd_pool_dhandle);
9416 h->ioaccel2_cmd_pool = NULL;
9417 h->ioaccel2_cmd_pool_dhandle = 0;
9418 }
9419 kfree(h->ioaccel2_blockFetchTable);
9420 h->ioaccel2_blockFetchTable = NULL;
9421}
9422
9423
9424static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9425{
9426 int rc;
9427
9428
9429
9430 h->ioaccel_maxsg =
9431 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9432 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
9433 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
9434
9435 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
9436 IOACCEL2_COMMANDLIST_ALIGNMENT);
9437 h->ioaccel2_cmd_pool =
9438 dma_alloc_coherent(&h->pdev->dev,
9439 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9440 &h->ioaccel2_cmd_pool_dhandle, GFP_KERNEL);
9441
9442 h->ioaccel2_blockFetchTable =
9443 kmalloc(((h->ioaccel_maxsg + 1) *
9444 sizeof(u32)), GFP_KERNEL);
9445
9446 if ((h->ioaccel2_cmd_pool == NULL) ||
9447 (h->ioaccel2_blockFetchTable == NULL)) {
9448 rc = -ENOMEM;
9449 goto clean_up;
9450 }
9451
9452 rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
9453 if (rc)
9454 goto clean_up;
9455
9456 memset(h->ioaccel2_cmd_pool, 0,
9457 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
9458 return 0;
9459
9460clean_up:
9461 hpsa_free_ioaccel2_cmd_and_bft(h);
9462 return rc;
9463}
9464
9465
9466static void hpsa_free_performant_mode(struct ctlr_info *h)
9467{
9468 kfree(h->blockFetchTable);
9469 h->blockFetchTable = NULL;
9470 hpsa_free_reply_queues(h);
9471 hpsa_free_ioaccel1_cmd_and_bft(h);
9472 hpsa_free_ioaccel2_cmd_and_bft(h);
9473}
9474
9475
9476
9477
9478static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
9479{
9480 u32 trans_support;
9481 unsigned long transMethod = CFGTBL_Trans_Performant |
9482 CFGTBL_Trans_use_short_tags;
9483 int i, rc;
9484
9485 if (hpsa_simple_mode)
9486 return 0;
9487
9488 trans_support = readl(&(h->cfgtable->TransportSupport));
9489 if (!(trans_support & PERFORMANT_MODE))
9490 return 0;
9491
9492
9493 if (trans_support & CFGTBL_Trans_io_accel1) {
9494 transMethod |= CFGTBL_Trans_io_accel1 |
9495 CFGTBL_Trans_enable_directed_msix;
9496 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
9497 if (rc)
9498 return rc;
9499 } else if (trans_support & CFGTBL_Trans_io_accel2) {
9500 transMethod |= CFGTBL_Trans_io_accel2 |
9501 CFGTBL_Trans_enable_directed_msix;
9502 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
9503 if (rc)
9504 return rc;
9505 }
9506
9507 h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1;
9508 hpsa_get_max_perf_mode_cmds(h);
9509
9510 h->reply_queue_size = h->max_commands * sizeof(u64);
9511
9512 for (i = 0; i < h->nreply_queues; i++) {
9513 h->reply_queue[i].head = dma_alloc_coherent(&h->pdev->dev,
9514 h->reply_queue_size,
9515 &h->reply_queue[i].busaddr,
9516 GFP_KERNEL);
9517 if (!h->reply_queue[i].head) {
9518 rc = -ENOMEM;
9519 goto clean1;
9520 }
9521 h->reply_queue[i].size = h->max_commands;
9522 h->reply_queue[i].wraparound = 1;
9523 h->reply_queue[i].current_entry = 0;
9524 }
9525
9526
9527 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
9528 sizeof(u32)), GFP_KERNEL);
9529 if (!h->blockFetchTable) {
9530 rc = -ENOMEM;
9531 goto clean1;
9532 }
9533
9534 rc = hpsa_enter_performant_mode(h, trans_support);
9535 if (rc)
9536 goto clean2;
9537 return 0;
9538
9539clean2:
9540 kfree(h->blockFetchTable);
9541 h->blockFetchTable = NULL;
9542clean1:
9543 hpsa_free_reply_queues(h);
9544 hpsa_free_ioaccel1_cmd_and_bft(h);
9545 hpsa_free_ioaccel2_cmd_and_bft(h);
9546 return rc;
9547}
9548
9549static int is_accelerated_cmd(struct CommandList *c)
9550{
9551 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
9552}
9553
9554static void hpsa_drain_accel_commands(struct ctlr_info *h)
9555{
9556 struct CommandList *c = NULL;
9557 int i, accel_cmds_out;
9558 int refcount;
9559
9560 do {
9561 accel_cmds_out = 0;
9562 for (i = 0; i < h->nr_cmds; i++) {
9563 c = h->cmd_pool + i;
9564 refcount = atomic_inc_return(&c->refcount);
9565 if (refcount > 1)
9566 accel_cmds_out += is_accelerated_cmd(c);
9567 cmd_free(h, c);
9568 }
9569 if (accel_cmds_out <= 0)
9570 break;
9571 msleep(100);
9572 } while (1);
9573}
9574
9575static struct hpsa_sas_phy *hpsa_alloc_sas_phy(
9576 struct hpsa_sas_port *hpsa_sas_port)
9577{
9578 struct hpsa_sas_phy *hpsa_sas_phy;
9579 struct sas_phy *phy;
9580
9581 hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL);
9582 if (!hpsa_sas_phy)
9583 return NULL;
9584
9585 phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev,
9586 hpsa_sas_port->next_phy_index);
9587 if (!phy) {
9588 kfree(hpsa_sas_phy);
9589 return NULL;
9590 }
9591
9592 hpsa_sas_port->next_phy_index++;
9593 hpsa_sas_phy->phy = phy;
9594 hpsa_sas_phy->parent_port = hpsa_sas_port;
9595
9596 return hpsa_sas_phy;
9597}
9598
9599static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9600{
9601 struct sas_phy *phy = hpsa_sas_phy->phy;
9602
9603 sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
9604 if (hpsa_sas_phy->added_to_port)
9605 list_del(&hpsa_sas_phy->phy_list_entry);
9606 sas_phy_delete(phy);
9607 kfree(hpsa_sas_phy);
9608}
9609
9610static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9611{
9612 int rc;
9613 struct hpsa_sas_port *hpsa_sas_port;
9614 struct sas_phy *phy;
9615 struct sas_identify *identify;
9616
9617 hpsa_sas_port = hpsa_sas_phy->parent_port;
9618 phy = hpsa_sas_phy->phy;
9619
9620 identify = &phy->identify;
9621 memset(identify, 0, sizeof(*identify));
9622 identify->sas_address = hpsa_sas_port->sas_address;
9623 identify->device_type = SAS_END_DEVICE;
9624 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9625 identify->target_port_protocols = SAS_PROTOCOL_STP;
9626 phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9627 phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9628 phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
9629 phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
9630 phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
9631
9632 rc = sas_phy_add(hpsa_sas_phy->phy);
9633 if (rc)
9634 return rc;
9635
9636 sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy);
9637 list_add_tail(&hpsa_sas_phy->phy_list_entry,
9638 &hpsa_sas_port->phy_list_head);
9639 hpsa_sas_phy->added_to_port = true;
9640
9641 return 0;
9642}
9643
9644static int
9645 hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port,
9646 struct sas_rphy *rphy)
9647{
9648 struct sas_identify *identify;
9649
9650 identify = &rphy->identify;
9651 identify->sas_address = hpsa_sas_port->sas_address;
9652 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9653 identify->target_port_protocols = SAS_PROTOCOL_STP;
9654
9655 return sas_rphy_add(rphy);
9656}
9657
9658static struct hpsa_sas_port
9659 *hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node,
9660 u64 sas_address)
9661{
9662 int rc;
9663 struct hpsa_sas_port *hpsa_sas_port;
9664 struct sas_port *port;
9665
9666 hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL);
9667 if (!hpsa_sas_port)
9668 return NULL;
9669
9670 INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head);
9671 hpsa_sas_port->parent_node = hpsa_sas_node;
9672
9673 port = sas_port_alloc_num(hpsa_sas_node->parent_dev);
9674 if (!port)
9675 goto free_hpsa_port;
9676
9677 rc = sas_port_add(port);
9678 if (rc)
9679 goto free_sas_port;
9680
9681 hpsa_sas_port->port = port;
9682 hpsa_sas_port->sas_address = sas_address;
9683 list_add_tail(&hpsa_sas_port->port_list_entry,
9684 &hpsa_sas_node->port_list_head);
9685
9686 return hpsa_sas_port;
9687
9688free_sas_port:
9689 sas_port_free(port);
9690free_hpsa_port:
9691 kfree(hpsa_sas_port);
9692
9693 return NULL;
9694}
9695
9696static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port)
9697{
9698 struct hpsa_sas_phy *hpsa_sas_phy;
9699 struct hpsa_sas_phy *next;
9700
9701 list_for_each_entry_safe(hpsa_sas_phy, next,
9702 &hpsa_sas_port->phy_list_head, phy_list_entry)
9703 hpsa_free_sas_phy(hpsa_sas_phy);
9704
9705 sas_port_delete(hpsa_sas_port->port);
9706 list_del(&hpsa_sas_port->port_list_entry);
9707 kfree(hpsa_sas_port);
9708}
9709
9710static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev)
9711{
9712 struct hpsa_sas_node *hpsa_sas_node;
9713
9714 hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL);
9715 if (hpsa_sas_node) {
9716 hpsa_sas_node->parent_dev = parent_dev;
9717 INIT_LIST_HEAD(&hpsa_sas_node->port_list_head);
9718 }
9719
9720 return hpsa_sas_node;
9721}
9722
9723static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node)
9724{
9725 struct hpsa_sas_port *hpsa_sas_port;
9726 struct hpsa_sas_port *next;
9727
9728 if (!hpsa_sas_node)
9729 return;
9730
9731 list_for_each_entry_safe(hpsa_sas_port, next,
9732 &hpsa_sas_node->port_list_head, port_list_entry)
9733 hpsa_free_sas_port(hpsa_sas_port);
9734
9735 kfree(hpsa_sas_node);
9736}
9737
9738static struct hpsa_scsi_dev_t
9739 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
9740 struct sas_rphy *rphy)
9741{
9742 int i;
9743 struct hpsa_scsi_dev_t *device;
9744
9745 for (i = 0; i < h->ndevices; i++) {
9746 device = h->dev[i];
9747 if (!device->sas_port)
9748 continue;
9749 if (device->sas_port->rphy == rphy)
9750 return device;
9751 }
9752
9753 return NULL;
9754}
9755
9756static int hpsa_add_sas_host(struct ctlr_info *h)
9757{
9758 int rc;
9759 struct device *parent_dev;
9760 struct hpsa_sas_node *hpsa_sas_node;
9761 struct hpsa_sas_port *hpsa_sas_port;
9762 struct hpsa_sas_phy *hpsa_sas_phy;
9763
9764 parent_dev = &h->scsi_host->shost_dev;
9765
9766 hpsa_sas_node = hpsa_alloc_sas_node(parent_dev);
9767 if (!hpsa_sas_node)
9768 return -ENOMEM;
9769
9770 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address);
9771 if (!hpsa_sas_port) {
9772 rc = -ENODEV;
9773 goto free_sas_node;
9774 }
9775
9776 hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port);
9777 if (!hpsa_sas_phy) {
9778 rc = -ENODEV;
9779 goto free_sas_port;
9780 }
9781
9782 rc = hpsa_sas_port_add_phy(hpsa_sas_phy);
9783 if (rc)
9784 goto free_sas_phy;
9785
9786 h->sas_host = hpsa_sas_node;
9787
9788 return 0;
9789
9790free_sas_phy:
9791 hpsa_free_sas_phy(hpsa_sas_phy);
9792free_sas_port:
9793 hpsa_free_sas_port(hpsa_sas_port);
9794free_sas_node:
9795 hpsa_free_sas_node(hpsa_sas_node);
9796
9797 return rc;
9798}
9799
9800static void hpsa_delete_sas_host(struct ctlr_info *h)
9801{
9802 hpsa_free_sas_node(h->sas_host);
9803}
9804
9805static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
9806 struct hpsa_scsi_dev_t *device)
9807{
9808 int rc;
9809 struct hpsa_sas_port *hpsa_sas_port;
9810 struct sas_rphy *rphy;
9811
9812 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address);
9813 if (!hpsa_sas_port)
9814 return -ENOMEM;
9815
9816 rphy = sas_end_device_alloc(hpsa_sas_port->port);
9817 if (!rphy) {
9818 rc = -ENODEV;
9819 goto free_sas_port;
9820 }
9821
9822 hpsa_sas_port->rphy = rphy;
9823 device->sas_port = hpsa_sas_port;
9824
9825 rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy);
9826 if (rc)
9827 goto free_sas_port;
9828
9829 return 0;
9830
9831free_sas_port:
9832 hpsa_free_sas_port(hpsa_sas_port);
9833 device->sas_port = NULL;
9834
9835 return rc;
9836}
9837
9838static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device)
9839{
9840 if (device->sas_port) {
9841 hpsa_free_sas_port(device->sas_port);
9842 device->sas_port = NULL;
9843 }
9844}
9845
9846static int
9847hpsa_sas_get_linkerrors(struct sas_phy *phy)
9848{
9849 return 0;
9850}
9851
9852static int
9853hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
9854{
9855 struct Scsi_Host *shost = phy_to_shost(rphy);
9856 struct ctlr_info *h;
9857 struct hpsa_scsi_dev_t *sd;
9858
9859 if (!shost)
9860 return -ENXIO;
9861
9862 h = shost_to_hba(shost);
9863
9864 if (!h)
9865 return -ENXIO;
9866
9867 sd = hpsa_find_device_by_sas_rphy(h, rphy);
9868 if (!sd)
9869 return -ENXIO;
9870
9871 *identifier = sd->eli;
9872
9873 return 0;
9874}
9875
9876static int
9877hpsa_sas_get_bay_identifier(struct sas_rphy *rphy)
9878{
9879 return -ENXIO;
9880}
9881
9882static int
9883hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset)
9884{
9885 return 0;
9886}
9887
9888static int
9889hpsa_sas_phy_enable(struct sas_phy *phy, int enable)
9890{
9891 return 0;
9892}
9893
9894static int
9895hpsa_sas_phy_setup(struct sas_phy *phy)
9896{
9897 return 0;
9898}
9899
9900static void
9901hpsa_sas_phy_release(struct sas_phy *phy)
9902{
9903}
9904
9905static int
9906hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
9907{
9908 return -EINVAL;
9909}
9910
9911static struct sas_function_template hpsa_sas_transport_functions = {
9912 .get_linkerrors = hpsa_sas_get_linkerrors,
9913 .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier,
9914 .get_bay_identifier = hpsa_sas_get_bay_identifier,
9915 .phy_reset = hpsa_sas_phy_reset,
9916 .phy_enable = hpsa_sas_phy_enable,
9917 .phy_setup = hpsa_sas_phy_setup,
9918 .phy_release = hpsa_sas_phy_release,
9919 .set_phy_speed = hpsa_sas_phy_speed,
9920};
9921
9922
9923
9924
9925
9926static int __init hpsa_init(void)
9927{
9928 int rc;
9929
9930 hpsa_sas_transport_template =
9931 sas_attach_transport(&hpsa_sas_transport_functions);
9932 if (!hpsa_sas_transport_template)
9933 return -ENODEV;
9934
9935 rc = pci_register_driver(&hpsa_pci_driver);
9936
9937 if (rc)
9938 sas_release_transport(hpsa_sas_transport_template);
9939
9940 return rc;
9941}
9942
9943static void __exit hpsa_cleanup(void)
9944{
9945 pci_unregister_driver(&hpsa_pci_driver);
9946 sas_release_transport(hpsa_sas_transport_template);
9947}
9948
9949static void __attribute__((unused)) verify_offsets(void)
9950{
9951#define VERIFY_OFFSET(member, offset) \
9952 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
9953
9954 VERIFY_OFFSET(structure_size, 0);
9955 VERIFY_OFFSET(volume_blk_size, 4);
9956 VERIFY_OFFSET(volume_blk_cnt, 8);
9957 VERIFY_OFFSET(phys_blk_shift, 16);
9958 VERIFY_OFFSET(parity_rotation_shift, 17);
9959 VERIFY_OFFSET(strip_size, 18);
9960 VERIFY_OFFSET(disk_starting_blk, 20);
9961 VERIFY_OFFSET(disk_blk_cnt, 28);
9962 VERIFY_OFFSET(data_disks_per_row, 36);
9963 VERIFY_OFFSET(metadata_disks_per_row, 38);
9964 VERIFY_OFFSET(row_cnt, 40);
9965 VERIFY_OFFSET(layout_map_count, 42);
9966 VERIFY_OFFSET(flags, 44);
9967 VERIFY_OFFSET(dekindex, 46);
9968
9969 VERIFY_OFFSET(data, 64);
9970
9971#undef VERIFY_OFFSET
9972
9973#define VERIFY_OFFSET(member, offset) \
9974 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
9975
9976 VERIFY_OFFSET(IU_type, 0);
9977 VERIFY_OFFSET(direction, 1);
9978 VERIFY_OFFSET(reply_queue, 2);
9979
9980 VERIFY_OFFSET(scsi_nexus, 4);
9981 VERIFY_OFFSET(Tag, 8);
9982 VERIFY_OFFSET(cdb, 16);
9983 VERIFY_OFFSET(cciss_lun, 32);
9984 VERIFY_OFFSET(data_len, 40);
9985 VERIFY_OFFSET(cmd_priority_task_attr, 44);
9986 VERIFY_OFFSET(sg_count, 45);
9987
9988 VERIFY_OFFSET(err_ptr, 48);
9989 VERIFY_OFFSET(err_len, 56);
9990
9991 VERIFY_OFFSET(sg, 64);
9992
9993#undef VERIFY_OFFSET
9994
9995#define VERIFY_OFFSET(member, offset) \
9996 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
9997
9998 VERIFY_OFFSET(dev_handle, 0x00);
9999 VERIFY_OFFSET(reserved1, 0x02);
10000 VERIFY_OFFSET(function, 0x03);
10001 VERIFY_OFFSET(reserved2, 0x04);
10002 VERIFY_OFFSET(err_info, 0x0C);
10003 VERIFY_OFFSET(reserved3, 0x10);
10004 VERIFY_OFFSET(err_info_len, 0x12);
10005 VERIFY_OFFSET(reserved4, 0x13);
10006 VERIFY_OFFSET(sgl_offset, 0x14);
10007 VERIFY_OFFSET(reserved5, 0x15);
10008 VERIFY_OFFSET(transfer_len, 0x1C);
10009 VERIFY_OFFSET(reserved6, 0x20);
10010 VERIFY_OFFSET(io_flags, 0x24);
10011 VERIFY_OFFSET(reserved7, 0x26);
10012 VERIFY_OFFSET(LUN, 0x34);
10013 VERIFY_OFFSET(control, 0x3C);
10014 VERIFY_OFFSET(CDB, 0x40);
10015 VERIFY_OFFSET(reserved8, 0x50);
10016 VERIFY_OFFSET(host_context_flags, 0x60);
10017 VERIFY_OFFSET(timeout_sec, 0x62);
10018 VERIFY_OFFSET(ReplyQueue, 0x64);
10019 VERIFY_OFFSET(reserved9, 0x65);
10020 VERIFY_OFFSET(tag, 0x68);
10021 VERIFY_OFFSET(host_addr, 0x70);
10022 VERIFY_OFFSET(CISS_LUN, 0x78);
10023 VERIFY_OFFSET(SG, 0x78 + 8);
10024#undef VERIFY_OFFSET
10025}
10026
10027module_init(hpsa_init);
10028module_exit(hpsa_cleanup);
10029