1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/module.h>
23#include <linux/interrupt.h>
24#include <linux/types.h>
25#include <linux/pci.h>
26#include <linux/pci-aspm.h>
27#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/delay.h>
30#include <linux/fs.h>
31#include <linux/timer.h>
32#include <linux/seq_file.h>
33#include <linux/init.h>
34#include <linux/spinlock.h>
35#include <linux/compat.h>
36#include <linux/blktrace_api.h>
37#include <linux/uaccess.h>
38#include <linux/io.h>
39#include <linux/dma-mapping.h>
40#include <linux/completion.h>
41#include <linux/moduleparam.h>
42#include <scsi/scsi.h>
43#include <scsi/scsi_cmnd.h>
44#include <scsi/scsi_device.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_tcq.h>
47#include <linux/cciss_ioctl.h>
48#include <linux/string.h>
49#include <linux/bitmap.h>
50#include <linux/atomic.h>
51#include <linux/kthread.h>
52#include <linux/jiffies.h>
53#include "hpsa_cmd.h"
54#include "hpsa.h"
55
56
57#define HPSA_DRIVER_VERSION "2.0.2-1"
58#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
59#define HPSA "hpsa"
60
61
62#define MAX_CONFIG_WAIT 30000
63#define MAX_IOCTL_CONFIG_WAIT 1000
64
65
66#define MAX_CMD_RETRIES 3
67
68
69MODULE_AUTHOR("Hewlett-Packard Company");
70MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
71 HPSA_DRIVER_VERSION);
72MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
73MODULE_VERSION(HPSA_DRIVER_VERSION);
74MODULE_LICENSE("GPL");
75
76static int hpsa_allow_any;
77module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
78MODULE_PARM_DESC(hpsa_allow_any,
79 "Allow hpsa driver to access unknown HP Smart Array hardware");
80static int hpsa_simple_mode;
81module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
82MODULE_PARM_DESC(hpsa_simple_mode,
83 "Use 'simple mode' rather than 'performant mode'");
84
85
86static const struct pci_device_id hpsa_pci_device_id[] = {
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1920},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334d},
111 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
112 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
113 {0,}
114};
115
116MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
117
118
119
120
121
122static struct board_type products[] = {
123 {0x3241103C, "Smart Array P212", &SA5_access},
124 {0x3243103C, "Smart Array P410", &SA5_access},
125 {0x3245103C, "Smart Array P410i", &SA5_access},
126 {0x3247103C, "Smart Array P411", &SA5_access},
127 {0x3249103C, "Smart Array P812", &SA5_access},
128 {0x324a103C, "Smart Array P712m", &SA5_access},
129 {0x324b103C, "Smart Array P711m", &SA5_access},
130 {0x3350103C, "Smart Array P222", &SA5_access},
131 {0x3351103C, "Smart Array P420", &SA5_access},
132 {0x3352103C, "Smart Array P421", &SA5_access},
133 {0x3353103C, "Smart Array P822", &SA5_access},
134 {0x3354103C, "Smart Array P420i", &SA5_access},
135 {0x3355103C, "Smart Array P220i", &SA5_access},
136 {0x3356103C, "Smart Array P721m", &SA5_access},
137 {0x1920103C, "Smart Array", &SA5_access},
138 {0x1921103C, "Smart Array", &SA5_access},
139 {0x1922103C, "Smart Array", &SA5_access},
140 {0x1923103C, "Smart Array", &SA5_access},
141 {0x1924103C, "Smart Array", &SA5_access},
142 {0x1925103C, "Smart Array", &SA5_access},
143 {0x1926103C, "Smart Array", &SA5_access},
144 {0x1928103C, "Smart Array", &SA5_access},
145 {0x334d103C, "Smart Array P822se", &SA5_access},
146 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
147};
148
149static int number_of_controllers;
150
151static struct list_head hpsa_ctlr_list = LIST_HEAD_INIT(hpsa_ctlr_list);
152static spinlock_t lockup_detector_lock;
153static struct task_struct *hpsa_lockup_detector;
154
155static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
156static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
157static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
158static void start_io(struct ctlr_info *h);
159
160#ifdef CONFIG_COMPAT
161static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
162#endif
163
164static void cmd_free(struct ctlr_info *h, struct CommandList *c);
165static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
166static struct CommandList *cmd_alloc(struct ctlr_info *h);
167static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
168static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
169 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
170 int cmd_type);
171
172static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
173static void hpsa_scan_start(struct Scsi_Host *);
174static int hpsa_scan_finished(struct Scsi_Host *sh,
175 unsigned long elapsed_time);
176static int hpsa_change_queue_depth(struct scsi_device *sdev,
177 int qdepth, int reason);
178
179static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
180static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
181static int hpsa_slave_alloc(struct scsi_device *sdev);
182static void hpsa_slave_destroy(struct scsi_device *sdev);
183
184static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
185static int check_for_unit_attention(struct ctlr_info *h,
186 struct CommandList *c);
187static void check_ioctl_unit_attention(struct ctlr_info *h,
188 struct CommandList *c);
189
190static void calc_bucket_map(int *bucket, int num_buckets,
191 int nsgs, int *bucket_map);
192static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
193static inline u32 next_command(struct ctlr_info *h, u8 q);
194static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
195 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
196 u64 *cfg_offset);
197static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
198 unsigned long *memory_bar);
199static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
200static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
201 void __iomem *vaddr, int wait_for_ready);
202static inline void finish_cmd(struct CommandList *c);
203#define BOARD_NOT_READY 0
204#define BOARD_READY 1
205
206static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
207{
208 unsigned long *priv = shost_priv(sdev->host);
209 return (struct ctlr_info *) *priv;
210}
211
212static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
213{
214 unsigned long *priv = shost_priv(sh);
215 return (struct ctlr_info *) *priv;
216}
217
218static int check_for_unit_attention(struct ctlr_info *h,
219 struct CommandList *c)
220{
221 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
222 return 0;
223
224 switch (c->err_info->SenseInfo[12]) {
225 case STATE_CHANGED:
226 dev_warn(&h->pdev->dev, HPSA "%d: a state change "
227 "detected, command retried\n", h->ctlr);
228 break;
229 case LUN_FAILED:
230 dev_warn(&h->pdev->dev, HPSA "%d: LUN failure "
231 "detected, action required\n", h->ctlr);
232 break;
233 case REPORT_LUNS_CHANGED:
234 dev_warn(&h->pdev->dev, HPSA "%d: report LUN data "
235 "changed, action required\n", h->ctlr);
236
237
238
239
240 break;
241 case POWER_OR_RESET:
242 dev_warn(&h->pdev->dev, HPSA "%d: a power on "
243 "or device reset detected\n", h->ctlr);
244 break;
245 case UNIT_ATTENTION_CLEARED:
246 dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
247 "cleared by another initiator\n", h->ctlr);
248 break;
249 default:
250 dev_warn(&h->pdev->dev, HPSA "%d: unknown "
251 "unit attention detected\n", h->ctlr);
252 break;
253 }
254 return 1;
255}
256
257static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
258{
259 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
260 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
261 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
262 return 0;
263 dev_warn(&h->pdev->dev, HPSA "device busy");
264 return 1;
265}
266
267static ssize_t host_store_rescan(struct device *dev,
268 struct device_attribute *attr,
269 const char *buf, size_t count)
270{
271 struct ctlr_info *h;
272 struct Scsi_Host *shost = class_to_shost(dev);
273 h = shost_to_hba(shost);
274 hpsa_scan_start(h->scsi_host);
275 return count;
276}
277
278static ssize_t host_show_firmware_revision(struct device *dev,
279 struct device_attribute *attr, char *buf)
280{
281 struct ctlr_info *h;
282 struct Scsi_Host *shost = class_to_shost(dev);
283 unsigned char *fwrev;
284
285 h = shost_to_hba(shost);
286 if (!h->hba_inquiry_data)
287 return 0;
288 fwrev = &h->hba_inquiry_data[32];
289 return snprintf(buf, 20, "%c%c%c%c\n",
290 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
291}
292
293static ssize_t host_show_commands_outstanding(struct device *dev,
294 struct device_attribute *attr, char *buf)
295{
296 struct Scsi_Host *shost = class_to_shost(dev);
297 struct ctlr_info *h = shost_to_hba(shost);
298
299 return snprintf(buf, 20, "%d\n", h->commands_outstanding);
300}
301
302static ssize_t host_show_transport_mode(struct device *dev,
303 struct device_attribute *attr, char *buf)
304{
305 struct ctlr_info *h;
306 struct Scsi_Host *shost = class_to_shost(dev);
307
308 h = shost_to_hba(shost);
309 return snprintf(buf, 20, "%s\n",
310 h->transMethod & CFGTBL_Trans_Performant ?
311 "performant" : "simple");
312}
313
314
315static u32 unresettable_controller[] = {
316 0x324a103C,
317 0x324b103C,
318 0x3223103C,
319 0x3234103C,
320 0x3235103C,
321 0x3211103C,
322 0x3212103C,
323 0x3213103C,
324 0x3214103C,
325 0x3215103C,
326 0x3237103C,
327 0x323D103C,
328 0x40800E11,
329 0x409C0E11,
330 0x409D0E11,
331 0x40700E11,
332 0x40820E11,
333 0x40830E11,
334 0x409A0E11,
335 0x409B0E11,
336 0x40910E11,
337};
338
339
340static u32 soft_unresettable_controller[] = {
341 0x40800E11,
342 0x40700E11,
343 0x40820E11,
344 0x40830E11,
345 0x409A0E11,
346 0x409B0E11,
347 0x40910E11,
348
349
350
351
352
353
354
355 0x409C0E11,
356 0x409D0E11,
357};
358
359static int ctlr_is_hard_resettable(u32 board_id)
360{
361 int i;
362
363 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
364 if (unresettable_controller[i] == board_id)
365 return 0;
366 return 1;
367}
368
369static int ctlr_is_soft_resettable(u32 board_id)
370{
371 int i;
372
373 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
374 if (soft_unresettable_controller[i] == board_id)
375 return 0;
376 return 1;
377}
378
379static int ctlr_is_resettable(u32 board_id)
380{
381 return ctlr_is_hard_resettable(board_id) ||
382 ctlr_is_soft_resettable(board_id);
383}
384
385static ssize_t host_show_resettable(struct device *dev,
386 struct device_attribute *attr, char *buf)
387{
388 struct ctlr_info *h;
389 struct Scsi_Host *shost = class_to_shost(dev);
390
391 h = shost_to_hba(shost);
392 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
393}
394
395static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
396{
397 return (scsi3addr[3] & 0xC0) == 0x40;
398}
399
400static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
401 "1(ADM)", "UNKNOWN"
402};
403#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
404
405static ssize_t raid_level_show(struct device *dev,
406 struct device_attribute *attr, char *buf)
407{
408 ssize_t l = 0;
409 unsigned char rlevel;
410 struct ctlr_info *h;
411 struct scsi_device *sdev;
412 struct hpsa_scsi_dev_t *hdev;
413 unsigned long flags;
414
415 sdev = to_scsi_device(dev);
416 h = sdev_to_hba(sdev);
417 spin_lock_irqsave(&h->lock, flags);
418 hdev = sdev->hostdata;
419 if (!hdev) {
420 spin_unlock_irqrestore(&h->lock, flags);
421 return -ENODEV;
422 }
423
424
425 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
426 spin_unlock_irqrestore(&h->lock, flags);
427 l = snprintf(buf, PAGE_SIZE, "N/A\n");
428 return l;
429 }
430
431 rlevel = hdev->raid_level;
432 spin_unlock_irqrestore(&h->lock, flags);
433 if (rlevel > RAID_UNKNOWN)
434 rlevel = RAID_UNKNOWN;
435 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
436 return l;
437}
438
439static ssize_t lunid_show(struct device *dev,
440 struct device_attribute *attr, char *buf)
441{
442 struct ctlr_info *h;
443 struct scsi_device *sdev;
444 struct hpsa_scsi_dev_t *hdev;
445 unsigned long flags;
446 unsigned char lunid[8];
447
448 sdev = to_scsi_device(dev);
449 h = sdev_to_hba(sdev);
450 spin_lock_irqsave(&h->lock, flags);
451 hdev = sdev->hostdata;
452 if (!hdev) {
453 spin_unlock_irqrestore(&h->lock, flags);
454 return -ENODEV;
455 }
456 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
457 spin_unlock_irqrestore(&h->lock, flags);
458 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
459 lunid[0], lunid[1], lunid[2], lunid[3],
460 lunid[4], lunid[5], lunid[6], lunid[7]);
461}
462
463static ssize_t unique_id_show(struct device *dev,
464 struct device_attribute *attr, char *buf)
465{
466 struct ctlr_info *h;
467 struct scsi_device *sdev;
468 struct hpsa_scsi_dev_t *hdev;
469 unsigned long flags;
470 unsigned char sn[16];
471
472 sdev = to_scsi_device(dev);
473 h = sdev_to_hba(sdev);
474 spin_lock_irqsave(&h->lock, flags);
475 hdev = sdev->hostdata;
476 if (!hdev) {
477 spin_unlock_irqrestore(&h->lock, flags);
478 return -ENODEV;
479 }
480 memcpy(sn, hdev->device_id, sizeof(sn));
481 spin_unlock_irqrestore(&h->lock, flags);
482 return snprintf(buf, 16 * 2 + 2,
483 "%02X%02X%02X%02X%02X%02X%02X%02X"
484 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
485 sn[0], sn[1], sn[2], sn[3],
486 sn[4], sn[5], sn[6], sn[7],
487 sn[8], sn[9], sn[10], sn[11],
488 sn[12], sn[13], sn[14], sn[15]);
489}
490
491static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
492static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
493static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
494static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
495static DEVICE_ATTR(firmware_revision, S_IRUGO,
496 host_show_firmware_revision, NULL);
497static DEVICE_ATTR(commands_outstanding, S_IRUGO,
498 host_show_commands_outstanding, NULL);
499static DEVICE_ATTR(transport_mode, S_IRUGO,
500 host_show_transport_mode, NULL);
501static DEVICE_ATTR(resettable, S_IRUGO,
502 host_show_resettable, NULL);
503
504static struct device_attribute *hpsa_sdev_attrs[] = {
505 &dev_attr_raid_level,
506 &dev_attr_lunid,
507 &dev_attr_unique_id,
508 NULL,
509};
510
511static struct device_attribute *hpsa_shost_attrs[] = {
512 &dev_attr_rescan,
513 &dev_attr_firmware_revision,
514 &dev_attr_commands_outstanding,
515 &dev_attr_transport_mode,
516 &dev_attr_resettable,
517 NULL,
518};
519
520static struct scsi_host_template hpsa_driver_template = {
521 .module = THIS_MODULE,
522 .name = HPSA,
523 .proc_name = HPSA,
524 .queuecommand = hpsa_scsi_queue_command,
525 .scan_start = hpsa_scan_start,
526 .scan_finished = hpsa_scan_finished,
527 .change_queue_depth = hpsa_change_queue_depth,
528 .this_id = -1,
529 .use_clustering = ENABLE_CLUSTERING,
530 .eh_abort_handler = hpsa_eh_abort_handler,
531 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
532 .ioctl = hpsa_ioctl,
533 .slave_alloc = hpsa_slave_alloc,
534 .slave_destroy = hpsa_slave_destroy,
535#ifdef CONFIG_COMPAT
536 .compat_ioctl = hpsa_compat_ioctl,
537#endif
538 .sdev_attrs = hpsa_sdev_attrs,
539 .shost_attrs = hpsa_shost_attrs,
540 .max_sectors = 8192,
541};
542
543
544
545static inline void addQ(struct list_head *list, struct CommandList *c)
546{
547 list_add_tail(&c->list, list);
548}
549
550static inline u32 next_command(struct ctlr_info *h, u8 q)
551{
552 u32 a;
553 struct reply_pool *rq = &h->reply_queue[q];
554 unsigned long flags;
555
556 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
557 return h->access.command_completed(h, q);
558
559 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
560 a = rq->head[rq->current_entry];
561 rq->current_entry++;
562 spin_lock_irqsave(&h->lock, flags);
563 h->commands_outstanding--;
564 spin_unlock_irqrestore(&h->lock, flags);
565 } else {
566 a = FIFO_EMPTY;
567 }
568
569 if (rq->current_entry == h->max_commands) {
570 rq->current_entry = 0;
571 rq->wraparound ^= 1;
572 }
573 return a;
574}
575
576
577
578
579
580static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
581{
582 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
583 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
584 if (likely(h->msix_vector))
585 c->Header.ReplyQueue =
586 smp_processor_id() % h->nreply_queues;
587 }
588}
589
590static int is_firmware_flash_cmd(u8 *cdb)
591{
592 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
593}
594
595
596
597
598
599
600#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
601#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
602static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
603 struct CommandList *c)
604{
605 if (!is_firmware_flash_cmd(c->Request.CDB))
606 return;
607 atomic_inc(&h->firmware_flash_in_progress);
608 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
609}
610
611static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
612 struct CommandList *c)
613{
614 if (is_firmware_flash_cmd(c->Request.CDB) &&
615 atomic_dec_and_test(&h->firmware_flash_in_progress))
616 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
617}
618
619static void enqueue_cmd_and_start_io(struct ctlr_info *h,
620 struct CommandList *c)
621{
622 unsigned long flags;
623
624 set_performant_mode(h, c);
625 dial_down_lockup_detection_during_fw_flash(h, c);
626 spin_lock_irqsave(&h->lock, flags);
627 addQ(&h->reqQ, c);
628 h->Qdepth++;
629 spin_unlock_irqrestore(&h->lock, flags);
630 start_io(h);
631}
632
633static inline void removeQ(struct CommandList *c)
634{
635 if (WARN_ON(list_empty(&c->list)))
636 return;
637 list_del_init(&c->list);
638}
639
640static inline int is_hba_lunid(unsigned char scsi3addr[])
641{
642 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
643}
644
645static inline int is_scsi_rev_5(struct ctlr_info *h)
646{
647 if (!h->hba_inquiry_data)
648 return 0;
649 if ((h->hba_inquiry_data[2] & 0x07) == 5)
650 return 1;
651 return 0;
652}
653
654static int hpsa_find_target_lun(struct ctlr_info *h,
655 unsigned char scsi3addr[], int bus, int *target, int *lun)
656{
657
658
659
660 int i, found = 0;
661 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
662
663 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
664
665 for (i = 0; i < h->ndevices; i++) {
666 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
667 __set_bit(h->dev[i]->target, lun_taken);
668 }
669
670 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
671 if (i < HPSA_MAX_DEVICES) {
672
673 *target = i;
674 *lun = 0;
675 found = 1;
676 }
677 return !found;
678}
679
680
681static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
682 struct hpsa_scsi_dev_t *device,
683 struct hpsa_scsi_dev_t *added[], int *nadded)
684{
685
686 int n = h->ndevices;
687 int i;
688 unsigned char addr1[8], addr2[8];
689 struct hpsa_scsi_dev_t *sd;
690
691 if (n >= HPSA_MAX_DEVICES) {
692 dev_err(&h->pdev->dev, "too many devices, some will be "
693 "inaccessible.\n");
694 return -1;
695 }
696
697
698 if (device->lun != -1)
699
700 goto lun_assigned;
701
702
703
704
705
706 if (device->scsi3addr[4] == 0) {
707
708 if (hpsa_find_target_lun(h, device->scsi3addr,
709 device->bus, &device->target, &device->lun) != 0)
710 return -1;
711 goto lun_assigned;
712 }
713
714
715
716
717
718
719
720 memcpy(addr1, device->scsi3addr, 8);
721 addr1[4] = 0;
722 for (i = 0; i < n; i++) {
723 sd = h->dev[i];
724 memcpy(addr2, sd->scsi3addr, 8);
725 addr2[4] = 0;
726
727 if (memcmp(addr1, addr2, 8) == 0) {
728 device->bus = sd->bus;
729 device->target = sd->target;
730 device->lun = device->scsi3addr[4];
731 break;
732 }
733 }
734 if (device->lun == -1) {
735 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
736 " suspect firmware bug or unsupported hardware "
737 "configuration.\n");
738 return -1;
739 }
740
741lun_assigned:
742
743 h->dev[n] = device;
744 h->ndevices++;
745 added[*nadded] = device;
746 (*nadded)++;
747
748
749
750
751
752
753 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
754 scsi_device_type(device->devtype), hostno,
755 device->bus, device->target, device->lun);
756 return 0;
757}
758
759
760static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
761 int entry, struct hpsa_scsi_dev_t *new_entry)
762{
763
764 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
765
766
767 h->dev[entry]->raid_level = new_entry->raid_level;
768 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
769 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
770 new_entry->target, new_entry->lun);
771}
772
773
774static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
775 int entry, struct hpsa_scsi_dev_t *new_entry,
776 struct hpsa_scsi_dev_t *added[], int *nadded,
777 struct hpsa_scsi_dev_t *removed[], int *nremoved)
778{
779
780 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
781 removed[*nremoved] = h->dev[entry];
782 (*nremoved)++;
783
784
785
786
787
788 if (new_entry->target == -1) {
789 new_entry->target = h->dev[entry]->target;
790 new_entry->lun = h->dev[entry]->lun;
791 }
792
793 h->dev[entry] = new_entry;
794 added[*nadded] = new_entry;
795 (*nadded)++;
796 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
797 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
798 new_entry->target, new_entry->lun);
799}
800
801
802static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
803 struct hpsa_scsi_dev_t *removed[], int *nremoved)
804{
805
806 int i;
807 struct hpsa_scsi_dev_t *sd;
808
809 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
810
811 sd = h->dev[entry];
812 removed[*nremoved] = h->dev[entry];
813 (*nremoved)++;
814
815 for (i = entry; i < h->ndevices-1; i++)
816 h->dev[i] = h->dev[i+1];
817 h->ndevices--;
818 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
819 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
820 sd->lun);
821}
822
823#define SCSI3ADDR_EQ(a, b) ( \
824 (a)[7] == (b)[7] && \
825 (a)[6] == (b)[6] && \
826 (a)[5] == (b)[5] && \
827 (a)[4] == (b)[4] && \
828 (a)[3] == (b)[3] && \
829 (a)[2] == (b)[2] && \
830 (a)[1] == (b)[1] && \
831 (a)[0] == (b)[0])
832
833static void fixup_botched_add(struct ctlr_info *h,
834 struct hpsa_scsi_dev_t *added)
835{
836
837
838
839 unsigned long flags;
840 int i, j;
841
842 spin_lock_irqsave(&h->lock, flags);
843 for (i = 0; i < h->ndevices; i++) {
844 if (h->dev[i] == added) {
845 for (j = i; j < h->ndevices-1; j++)
846 h->dev[j] = h->dev[j+1];
847 h->ndevices--;
848 break;
849 }
850 }
851 spin_unlock_irqrestore(&h->lock, flags);
852 kfree(added);
853}
854
855static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
856 struct hpsa_scsi_dev_t *dev2)
857{
858
859
860
861
862 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
863 sizeof(dev1->scsi3addr)) != 0)
864 return 0;
865 if (memcmp(dev1->device_id, dev2->device_id,
866 sizeof(dev1->device_id)) != 0)
867 return 0;
868 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
869 return 0;
870 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
871 return 0;
872 if (dev1->devtype != dev2->devtype)
873 return 0;
874 if (dev1->bus != dev2->bus)
875 return 0;
876 return 1;
877}
878
879static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
880 struct hpsa_scsi_dev_t *dev2)
881{
882
883
884
885
886 if (dev1->raid_level != dev2->raid_level)
887 return 1;
888 return 0;
889}
890
891
892
893
894
895
896
897
898
899static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
900 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
901 int *index)
902{
903 int i;
904#define DEVICE_NOT_FOUND 0
905#define DEVICE_CHANGED 1
906#define DEVICE_SAME 2
907#define DEVICE_UPDATED 3
908 for (i = 0; i < haystack_size; i++) {
909 if (haystack[i] == NULL)
910 continue;
911 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
912 *index = i;
913 if (device_is_the_same(needle, haystack[i])) {
914 if (device_updated(needle, haystack[i]))
915 return DEVICE_UPDATED;
916 return DEVICE_SAME;
917 } else {
918 return DEVICE_CHANGED;
919 }
920 }
921 }
922 *index = -1;
923 return DEVICE_NOT_FOUND;
924}
925
926static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
927 struct hpsa_scsi_dev_t *sd[], int nsds)
928{
929
930
931
932
933 int i, entry, device_change, changes = 0;
934 struct hpsa_scsi_dev_t *csd;
935 unsigned long flags;
936 struct hpsa_scsi_dev_t **added, **removed;
937 int nadded, nremoved;
938 struct Scsi_Host *sh = NULL;
939
940 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
941 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
942
943 if (!added || !removed) {
944 dev_warn(&h->pdev->dev, "out of memory in "
945 "adjust_hpsa_scsi_table\n");
946 goto free_and_out;
947 }
948
949 spin_lock_irqsave(&h->devlock, flags);
950
951
952
953
954
955
956
957
958 i = 0;
959 nremoved = 0;
960 nadded = 0;
961 while (i < h->ndevices) {
962 csd = h->dev[i];
963 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
964 if (device_change == DEVICE_NOT_FOUND) {
965 changes++;
966 hpsa_scsi_remove_entry(h, hostno, i,
967 removed, &nremoved);
968 continue;
969 } else if (device_change == DEVICE_CHANGED) {
970 changes++;
971 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
972 added, &nadded, removed, &nremoved);
973
974
975
976 sd[entry] = NULL;
977 } else if (device_change == DEVICE_UPDATED) {
978 hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
979 }
980 i++;
981 }
982
983
984
985
986
987 for (i = 0; i < nsds; i++) {
988 if (!sd[i])
989 continue;
990 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
991 h->ndevices, &entry);
992 if (device_change == DEVICE_NOT_FOUND) {
993 changes++;
994 if (hpsa_scsi_add_entry(h, hostno, sd[i],
995 added, &nadded) != 0)
996 break;
997 sd[i] = NULL;
998 } else if (device_change == DEVICE_CHANGED) {
999
1000 changes++;
1001 dev_warn(&h->pdev->dev,
1002 "device unexpectedly changed.\n");
1003
1004 }
1005 }
1006 spin_unlock_irqrestore(&h->devlock, flags);
1007
1008
1009
1010
1011
1012 if (hostno == -1 || !changes)
1013 goto free_and_out;
1014
1015 sh = h->scsi_host;
1016
1017 for (i = 0; i < nremoved; i++) {
1018 struct scsi_device *sdev =
1019 scsi_device_lookup(sh, removed[i]->bus,
1020 removed[i]->target, removed[i]->lun);
1021 if (sdev != NULL) {
1022 scsi_remove_device(sdev);
1023 scsi_device_put(sdev);
1024 } else {
1025
1026
1027
1028
1029 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
1030 " for removal.", hostno, removed[i]->bus,
1031 removed[i]->target, removed[i]->lun);
1032 }
1033 kfree(removed[i]);
1034 removed[i] = NULL;
1035 }
1036
1037
1038 for (i = 0; i < nadded; i++) {
1039 if (scsi_add_device(sh, added[i]->bus,
1040 added[i]->target, added[i]->lun) == 0)
1041 continue;
1042 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
1043 "device not added.\n", hostno, added[i]->bus,
1044 added[i]->target, added[i]->lun);
1045
1046
1047
1048 fixup_botched_add(h, added[i]);
1049 }
1050
1051free_and_out:
1052 kfree(added);
1053 kfree(removed);
1054}
1055
1056
1057
1058
1059
1060static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1061 int bus, int target, int lun)
1062{
1063 int i;
1064 struct hpsa_scsi_dev_t *sd;
1065
1066 for (i = 0; i < h->ndevices; i++) {
1067 sd = h->dev[i];
1068 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1069 return sd;
1070 }
1071 return NULL;
1072}
1073
1074
1075static int hpsa_slave_alloc(struct scsi_device *sdev)
1076{
1077 struct hpsa_scsi_dev_t *sd;
1078 unsigned long flags;
1079 struct ctlr_info *h;
1080
1081 h = sdev_to_hba(sdev);
1082 spin_lock_irqsave(&h->devlock, flags);
1083 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1084 sdev_id(sdev), sdev->lun);
1085 if (sd != NULL)
1086 sdev->hostdata = sd;
1087 spin_unlock_irqrestore(&h->devlock, flags);
1088 return 0;
1089}
1090
1091static void hpsa_slave_destroy(struct scsi_device *sdev)
1092{
1093
1094}
1095
1096static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1097{
1098 int i;
1099
1100 if (!h->cmd_sg_list)
1101 return;
1102 for (i = 0; i < h->nr_cmds; i++) {
1103 kfree(h->cmd_sg_list[i]);
1104 h->cmd_sg_list[i] = NULL;
1105 }
1106 kfree(h->cmd_sg_list);
1107 h->cmd_sg_list = NULL;
1108}
1109
1110static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
1111{
1112 int i;
1113
1114 if (h->chainsize <= 0)
1115 return 0;
1116
1117 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1118 GFP_KERNEL);
1119 if (!h->cmd_sg_list)
1120 return -ENOMEM;
1121 for (i = 0; i < h->nr_cmds; i++) {
1122 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1123 h->chainsize, GFP_KERNEL);
1124 if (!h->cmd_sg_list[i])
1125 goto clean;
1126 }
1127 return 0;
1128
1129clean:
1130 hpsa_free_sg_chain_blocks(h);
1131 return -ENOMEM;
1132}
1133
1134static void hpsa_map_sg_chain_block(struct ctlr_info *h,
1135 struct CommandList *c)
1136{
1137 struct SGDescriptor *chain_sg, *chain_block;
1138 u64 temp64;
1139
1140 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1141 chain_block = h->cmd_sg_list[c->cmdindex];
1142 chain_sg->Ext = HPSA_SG_CHAIN;
1143 chain_sg->Len = sizeof(*chain_sg) *
1144 (c->Header.SGTotal - h->max_cmd_sg_entries);
1145 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
1146 PCI_DMA_TODEVICE);
1147 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
1148 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
1149}
1150
1151static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1152 struct CommandList *c)
1153{
1154 struct SGDescriptor *chain_sg;
1155 union u64bit temp64;
1156
1157 if (c->Header.SGTotal <= h->max_cmd_sg_entries)
1158 return;
1159
1160 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1161 temp64.val32.lower = chain_sg->Addr.lower;
1162 temp64.val32.upper = chain_sg->Addr.upper;
1163 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
1164}
1165
1166static void complete_scsi_command(struct CommandList *cp)
1167{
1168 struct scsi_cmnd *cmd;
1169 struct ctlr_info *h;
1170 struct ErrorInfo *ei;
1171
1172 unsigned char sense_key;
1173 unsigned char asc;
1174 unsigned char ascq;
1175 unsigned long sense_data_size;
1176
1177 ei = cp->err_info;
1178 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
1179 h = cp->h;
1180
1181 scsi_dma_unmap(cmd);
1182 if (cp->Header.SGTotal > h->max_cmd_sg_entries)
1183 hpsa_unmap_sg_chain_block(h, cp);
1184
1185 cmd->result = (DID_OK << 16);
1186 cmd->result |= (COMMAND_COMPLETE << 8);
1187 cmd->result |= ei->ScsiStatus;
1188
1189
1190 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
1191 sense_data_size = SCSI_SENSE_BUFFERSIZE;
1192 else
1193 sense_data_size = sizeof(ei->SenseInfo);
1194 if (ei->SenseLen < sense_data_size)
1195 sense_data_size = ei->SenseLen;
1196
1197 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
1198 scsi_set_resid(cmd, ei->ResidualCnt);
1199
1200 if (ei->CommandStatus == 0) {
1201 cmd->scsi_done(cmd);
1202 cmd_free(h, cp);
1203 return;
1204 }
1205
1206
1207 switch (ei->CommandStatus) {
1208
1209 case CMD_TARGET_STATUS:
1210 if (ei->ScsiStatus) {
1211
1212 sense_key = 0xf & ei->SenseInfo[2];
1213
1214 asc = ei->SenseInfo[12];
1215
1216 ascq = ei->SenseInfo[13];
1217 }
1218
1219 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1220 if (check_for_unit_attention(h, cp)) {
1221 cmd->result = DID_SOFT_ERROR << 16;
1222 break;
1223 }
1224 if (sense_key == ILLEGAL_REQUEST) {
1225
1226
1227
1228
1229 if (cp->Request.CDB[0] == REPORT_LUNS)
1230 break;
1231
1232
1233
1234
1235 if ((asc == 0x25) && (ascq == 0x0)) {
1236 dev_warn(&h->pdev->dev, "cp %p "
1237 "has check condition\n", cp);
1238 break;
1239 }
1240 }
1241
1242 if (sense_key == NOT_READY) {
1243
1244
1245
1246
1247 if ((asc == 0x04) && (ascq == 0x03)) {
1248 dev_warn(&h->pdev->dev, "cp %p "
1249 "has check condition: unit "
1250 "not ready, manual "
1251 "intervention required\n", cp);
1252 break;
1253 }
1254 }
1255 if (sense_key == ABORTED_COMMAND) {
1256
1257 dev_warn(&h->pdev->dev, "cp %p "
1258 "has check condition: aborted command: "
1259 "ASC: 0x%x, ASCQ: 0x%x\n",
1260 cp, asc, ascq);
1261 cmd->result = DID_SOFT_ERROR << 16;
1262 break;
1263 }
1264
1265 dev_dbg(&h->pdev->dev, "cp %p has check condition: "
1266 "unknown type: "
1267 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1268 "Returning result: 0x%x, "
1269 "cmd=[%02x %02x %02x %02x %02x "
1270 "%02x %02x %02x %02x %02x %02x "
1271 "%02x %02x %02x %02x %02x]\n",
1272 cp, sense_key, asc, ascq,
1273 cmd->result,
1274 cmd->cmnd[0], cmd->cmnd[1],
1275 cmd->cmnd[2], cmd->cmnd[3],
1276 cmd->cmnd[4], cmd->cmnd[5],
1277 cmd->cmnd[6], cmd->cmnd[7],
1278 cmd->cmnd[8], cmd->cmnd[9],
1279 cmd->cmnd[10], cmd->cmnd[11],
1280 cmd->cmnd[12], cmd->cmnd[13],
1281 cmd->cmnd[14], cmd->cmnd[15]);
1282 break;
1283 }
1284
1285
1286
1287
1288
1289 if (ei->ScsiStatus) {
1290 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1291 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1292 "Returning result: 0x%x\n",
1293 cp, ei->ScsiStatus,
1294 sense_key, asc, ascq,
1295 cmd->result);
1296 } else {
1297 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1298 "Returning no connection.\n", cp),
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312 cmd->result = DID_NO_CONNECT << 16;
1313 }
1314 break;
1315
1316 case CMD_DATA_UNDERRUN:
1317 break;
1318 case CMD_DATA_OVERRUN:
1319 dev_warn(&h->pdev->dev, "cp %p has"
1320 " completed with data overrun "
1321 "reported\n", cp);
1322 break;
1323 case CMD_INVALID: {
1324
1325
1326
1327
1328
1329
1330
1331
1332 cmd->result = DID_NO_CONNECT << 16;
1333 }
1334 break;
1335 case CMD_PROTOCOL_ERR:
1336 cmd->result = DID_ERROR << 16;
1337 dev_warn(&h->pdev->dev, "cp %p has "
1338 "protocol error\n", cp);
1339 break;
1340 case CMD_HARDWARE_ERR:
1341 cmd->result = DID_ERROR << 16;
1342 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
1343 break;
1344 case CMD_CONNECTION_LOST:
1345 cmd->result = DID_ERROR << 16;
1346 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1347 break;
1348 case CMD_ABORTED:
1349 cmd->result = DID_ABORT << 16;
1350 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1351 cp, ei->ScsiStatus);
1352 break;
1353 case CMD_ABORT_FAILED:
1354 cmd->result = DID_ERROR << 16;
1355 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1356 break;
1357 case CMD_UNSOLICITED_ABORT:
1358 cmd->result = DID_SOFT_ERROR << 16;
1359 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
1360 "abort\n", cp);
1361 break;
1362 case CMD_TIMEOUT:
1363 cmd->result = DID_TIME_OUT << 16;
1364 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1365 break;
1366 case CMD_UNABORTABLE:
1367 cmd->result = DID_ERROR << 16;
1368 dev_warn(&h->pdev->dev, "Command unabortable\n");
1369 break;
1370 default:
1371 cmd->result = DID_ERROR << 16;
1372 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1373 cp, ei->CommandStatus);
1374 }
1375 cmd->scsi_done(cmd);
1376 cmd_free(h, cp);
1377}
1378
1379static void hpsa_pci_unmap(struct pci_dev *pdev,
1380 struct CommandList *c, int sg_used, int data_direction)
1381{
1382 int i;
1383 union u64bit addr64;
1384
1385 for (i = 0; i < sg_used; i++) {
1386 addr64.val32.lower = c->SG[i].Addr.lower;
1387 addr64.val32.upper = c->SG[i].Addr.upper;
1388 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
1389 data_direction);
1390 }
1391}
1392
1393static void hpsa_map_one(struct pci_dev *pdev,
1394 struct CommandList *cp,
1395 unsigned char *buf,
1396 size_t buflen,
1397 int data_direction)
1398{
1399 u64 addr64;
1400
1401 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1402 cp->Header.SGList = 0;
1403 cp->Header.SGTotal = 0;
1404 return;
1405 }
1406
1407 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
1408 cp->SG[0].Addr.lower =
1409 (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
1410 cp->SG[0].Addr.upper =
1411 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
1412 cp->SG[0].Len = buflen;
1413 cp->Header.SGList = (u8) 1;
1414 cp->Header.SGTotal = (u16) 1;
1415}
1416
1417static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1418 struct CommandList *c)
1419{
1420 DECLARE_COMPLETION_ONSTACK(wait);
1421
1422 c->waiting = &wait;
1423 enqueue_cmd_and_start_io(h, c);
1424 wait_for_completion(&wait);
1425}
1426
1427static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
1428 struct CommandList *c)
1429{
1430 unsigned long flags;
1431
1432
1433 spin_lock_irqsave(&h->lock, flags);
1434 if (unlikely(h->lockup_detected)) {
1435 spin_unlock_irqrestore(&h->lock, flags);
1436 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
1437 } else {
1438 spin_unlock_irqrestore(&h->lock, flags);
1439 hpsa_scsi_do_simple_cmd_core(h, c);
1440 }
1441}
1442
1443#define MAX_DRIVER_CMD_RETRIES 25
1444static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
1445 struct CommandList *c, int data_direction)
1446{
1447 int backoff_time = 10, retry_count = 0;
1448
1449 do {
1450 memset(c->err_info, 0, sizeof(*c->err_info));
1451 hpsa_scsi_do_simple_cmd_core(h, c);
1452 retry_count++;
1453 if (retry_count > 3) {
1454 msleep(backoff_time);
1455 if (backoff_time < 1000)
1456 backoff_time *= 2;
1457 }
1458 } while ((check_for_unit_attention(h, c) ||
1459 check_for_busy(h, c)) &&
1460 retry_count <= MAX_DRIVER_CMD_RETRIES);
1461 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
1462}
1463
1464static void hpsa_scsi_interpret_error(struct CommandList *cp)
1465{
1466 struct ErrorInfo *ei;
1467 struct device *d = &cp->h->pdev->dev;
1468
1469 ei = cp->err_info;
1470 switch (ei->CommandStatus) {
1471 case CMD_TARGET_STATUS:
1472 dev_warn(d, "cmd %p has completed with errors\n", cp);
1473 dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
1474 ei->ScsiStatus);
1475 if (ei->ScsiStatus == 0)
1476 dev_warn(d, "SCSI status is abnormally zero. "
1477 "(probably indicates selection timeout "
1478 "reported incorrectly due to a known "
1479 "firmware bug, circa July, 2001.)\n");
1480 break;
1481 case CMD_DATA_UNDERRUN:
1482 dev_info(d, "UNDERRUN\n");
1483 break;
1484 case CMD_DATA_OVERRUN:
1485 dev_warn(d, "cp %p has completed with data overrun\n", cp);
1486 break;
1487 case CMD_INVALID: {
1488
1489
1490
1491 dev_warn(d, "cp %p is reported invalid (probably means "
1492 "target device no longer present)\n", cp);
1493
1494
1495 }
1496 break;
1497 case CMD_PROTOCOL_ERR:
1498 dev_warn(d, "cp %p has protocol error \n", cp);
1499 break;
1500 case CMD_HARDWARE_ERR:
1501
1502 dev_warn(d, "cp %p had hardware error\n", cp);
1503 break;
1504 case CMD_CONNECTION_LOST:
1505 dev_warn(d, "cp %p had connection lost\n", cp);
1506 break;
1507 case CMD_ABORTED:
1508 dev_warn(d, "cp %p was aborted\n", cp);
1509 break;
1510 case CMD_ABORT_FAILED:
1511 dev_warn(d, "cp %p reports abort failed\n", cp);
1512 break;
1513 case CMD_UNSOLICITED_ABORT:
1514 dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
1515 break;
1516 case CMD_TIMEOUT:
1517 dev_warn(d, "cp %p timed out\n", cp);
1518 break;
1519 case CMD_UNABORTABLE:
1520 dev_warn(d, "Command unabortable\n");
1521 break;
1522 default:
1523 dev_warn(d, "cp %p returned unknown status %x\n", cp,
1524 ei->CommandStatus);
1525 }
1526}
1527
1528static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
1529 unsigned char page, unsigned char *buf,
1530 unsigned char bufsize)
1531{
1532 int rc = IO_OK;
1533 struct CommandList *c;
1534 struct ErrorInfo *ei;
1535
1536 c = cmd_special_alloc(h);
1537
1538 if (c == NULL) {
1539 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1540 return -ENOMEM;
1541 }
1542
1543 fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
1544 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1545 ei = c->err_info;
1546 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
1547 hpsa_scsi_interpret_error(c);
1548 rc = -1;
1549 }
1550 cmd_special_free(h, c);
1551 return rc;
1552}
1553
1554static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
1555{
1556 int rc = IO_OK;
1557 struct CommandList *c;
1558 struct ErrorInfo *ei;
1559
1560 c = cmd_special_alloc(h);
1561
1562 if (c == NULL) {
1563 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1564 return -ENOMEM;
1565 }
1566
1567 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
1568 hpsa_scsi_do_simple_cmd_core(h, c);
1569
1570
1571 ei = c->err_info;
1572 if (ei->CommandStatus != 0) {
1573 hpsa_scsi_interpret_error(c);
1574 rc = -1;
1575 }
1576 cmd_special_free(h, c);
1577 return rc;
1578}
1579
1580static void hpsa_get_raid_level(struct ctlr_info *h,
1581 unsigned char *scsi3addr, unsigned char *raid_level)
1582{
1583 int rc;
1584 unsigned char *buf;
1585
1586 *raid_level = RAID_UNKNOWN;
1587 buf = kzalloc(64, GFP_KERNEL);
1588 if (!buf)
1589 return;
1590 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
1591 if (rc == 0)
1592 *raid_level = buf[8];
1593 if (*raid_level > RAID_UNKNOWN)
1594 *raid_level = RAID_UNKNOWN;
1595 kfree(buf);
1596 return;
1597}
1598
1599
1600static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
1601 unsigned char *device_id, int buflen)
1602{
1603 int rc;
1604 unsigned char *buf;
1605
1606 if (buflen > 16)
1607 buflen = 16;
1608 buf = kzalloc(64, GFP_KERNEL);
1609 if (!buf)
1610 return -1;
1611 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
1612 if (rc == 0)
1613 memcpy(device_id, &buf[8], buflen);
1614 kfree(buf);
1615 return rc != 0;
1616}
1617
1618static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
1619 struct ReportLUNdata *buf, int bufsize,
1620 int extended_response)
1621{
1622 int rc = IO_OK;
1623 struct CommandList *c;
1624 unsigned char scsi3addr[8];
1625 struct ErrorInfo *ei;
1626
1627 c = cmd_special_alloc(h);
1628 if (c == NULL) {
1629 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1630 return -1;
1631 }
1632
1633 memset(scsi3addr, 0, sizeof(scsi3addr));
1634 fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
1635 buf, bufsize, 0, scsi3addr, TYPE_CMD);
1636 if (extended_response)
1637 c->Request.CDB[1] = extended_response;
1638 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1639 ei = c->err_info;
1640 if (ei->CommandStatus != 0 &&
1641 ei->CommandStatus != CMD_DATA_UNDERRUN) {
1642 hpsa_scsi_interpret_error(c);
1643 rc = -1;
1644 }
1645 cmd_special_free(h, c);
1646 return rc;
1647}
1648
1649static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
1650 struct ReportLUNdata *buf,
1651 int bufsize, int extended_response)
1652{
1653 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
1654}
1655
1656static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
1657 struct ReportLUNdata *buf, int bufsize)
1658{
1659 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
1660}
1661
1662static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
1663 int bus, int target, int lun)
1664{
1665 device->bus = bus;
1666 device->target = target;
1667 device->lun = lun;
1668}
1669
1670static int hpsa_update_device_info(struct ctlr_info *h,
1671 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
1672 unsigned char *is_OBDR_device)
1673{
1674
1675#define OBDR_SIG_OFFSET 43
1676#define OBDR_TAPE_SIG "$DR-10"
1677#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
1678#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
1679
1680 unsigned char *inq_buff;
1681 unsigned char *obdr_sig;
1682
1683 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1684 if (!inq_buff)
1685 goto bail_out;
1686
1687
1688 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
1689 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
1690
1691 dev_err(&h->pdev->dev,
1692 "hpsa_update_device_info: inquiry failed\n");
1693 goto bail_out;
1694 }
1695
1696 this_device->devtype = (inq_buff[0] & 0x1f);
1697 memcpy(this_device->scsi3addr, scsi3addr, 8);
1698 memcpy(this_device->vendor, &inq_buff[8],
1699 sizeof(this_device->vendor));
1700 memcpy(this_device->model, &inq_buff[16],
1701 sizeof(this_device->model));
1702 memset(this_device->device_id, 0,
1703 sizeof(this_device->device_id));
1704 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
1705 sizeof(this_device->device_id));
1706
1707 if (this_device->devtype == TYPE_DISK &&
1708 is_logical_dev_addr_mode(scsi3addr))
1709 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
1710 else
1711 this_device->raid_level = RAID_UNKNOWN;
1712
1713 if (is_OBDR_device) {
1714
1715
1716
1717 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
1718 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
1719 strncmp(obdr_sig, OBDR_TAPE_SIG,
1720 OBDR_SIG_LEN) == 0);
1721 }
1722
1723 kfree(inq_buff);
1724 return 0;
1725
1726bail_out:
1727 kfree(inq_buff);
1728 return 1;
1729}
1730
1731static unsigned char *ext_target_model[] = {
1732 "MSA2012",
1733 "MSA2024",
1734 "MSA2312",
1735 "MSA2324",
1736 "P2000 G3 SAS",
1737 NULL,
1738};
1739
1740static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1741{
1742 int i;
1743
1744 for (i = 0; ext_target_model[i]; i++)
1745 if (strncmp(device->model, ext_target_model[i],
1746 strlen(ext_target_model[i])) == 0)
1747 return 1;
1748 return 0;
1749}
1750
1751
1752
1753
1754
1755
1756
1757
1758static void figure_bus_target_lun(struct ctlr_info *h,
1759 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
1760{
1761 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1762
1763 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
1764
1765 if (is_hba_lunid(lunaddrbytes))
1766 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
1767 else
1768
1769 hpsa_set_bus_target_lun(device, 2, -1, -1);
1770 return;
1771 }
1772
1773 if (is_ext_target(h, device)) {
1774
1775
1776
1777
1778 hpsa_set_bus_target_lun(device,
1779 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
1780 return;
1781 }
1782 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
1783}
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796static int add_ext_target_dev(struct ctlr_info *h,
1797 struct hpsa_scsi_dev_t *tmpdevice,
1798 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
1799 unsigned long lunzerobits[], int *n_ext_target_devs)
1800{
1801 unsigned char scsi3addr[8];
1802
1803 if (test_bit(tmpdevice->target, lunzerobits))
1804 return 0;
1805
1806 if (!is_logical_dev_addr_mode(lunaddrbytes))
1807 return 0;
1808
1809 if (!is_ext_target(h, tmpdevice))
1810 return 0;
1811
1812 if (tmpdevice->lun == 0)
1813 return 0;
1814
1815 memset(scsi3addr, 0, 8);
1816 scsi3addr[3] = tmpdevice->target;
1817 if (is_hba_lunid(scsi3addr))
1818 return 0;
1819
1820 if (is_scsi_rev_5(h))
1821 return 0;
1822
1823 if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
1824 dev_warn(&h->pdev->dev, "Maximum number of external "
1825 "target devices exceeded. Check your hardware "
1826 "configuration.");
1827 return 0;
1828 }
1829
1830 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
1831 return 0;
1832 (*n_ext_target_devs)++;
1833 hpsa_set_bus_target_lun(this_device,
1834 tmpdevice->bus, tmpdevice->target, 0);
1835 set_bit(tmpdevice->target, lunzerobits);
1836 return 1;
1837}
1838
1839
1840
1841
1842
1843
1844
1845static int hpsa_gather_lun_info(struct ctlr_info *h,
1846 int reportlunsize,
1847 struct ReportLUNdata *physdev, u32 *nphysicals,
1848 struct ReportLUNdata *logdev, u32 *nlogicals)
1849{
1850 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
1851 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
1852 return -1;
1853 }
1854 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
1855 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
1856 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
1857 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1858 *nphysicals - HPSA_MAX_PHYS_LUN);
1859 *nphysicals = HPSA_MAX_PHYS_LUN;
1860 }
1861 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
1862 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
1863 return -1;
1864 }
1865 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
1866
1867 if (*nlogicals > HPSA_MAX_LUN) {
1868 dev_warn(&h->pdev->dev,
1869 "maximum logical LUNs (%d) exceeded. "
1870 "%d LUNs ignored.\n", HPSA_MAX_LUN,
1871 *nlogicals - HPSA_MAX_LUN);
1872 *nlogicals = HPSA_MAX_LUN;
1873 }
1874 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
1875 dev_warn(&h->pdev->dev,
1876 "maximum logical + physical LUNs (%d) exceeded. "
1877 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1878 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
1879 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
1880 }
1881 return 0;
1882}
1883
1884u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
1885 int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list,
1886 struct ReportLUNdata *logdev_list)
1887{
1888
1889
1890
1891
1892
1893 int logicals_start = nphysicals + (raid_ctlr_position == 0);
1894 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
1895
1896 if (i == raid_ctlr_position)
1897 return RAID_CTLR_LUNID;
1898
1899 if (i < logicals_start)
1900 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0];
1901
1902 if (i < last_device)
1903 return &logdev_list->LUN[i - nphysicals -
1904 (raid_ctlr_position == 0)][0];
1905 BUG();
1906 return NULL;
1907}
1908
1909static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1910{
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921 struct ReportLUNdata *physdev_list = NULL;
1922 struct ReportLUNdata *logdev_list = NULL;
1923 u32 nphysicals = 0;
1924 u32 nlogicals = 0;
1925 u32 ndev_allocated = 0;
1926 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
1927 int ncurrent = 0;
1928 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
1929 int i, n_ext_target_devs, ndevs_to_allocate;
1930 int raid_ctlr_position;
1931 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
1932
1933 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
1934 physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1935 logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1936 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
1937
1938 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
1939 dev_err(&h->pdev->dev, "out of memory\n");
1940 goto out;
1941 }
1942 memset(lunzerobits, 0, sizeof(lunzerobits));
1943
1944 if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
1945 logdev_list, &nlogicals))
1946 goto out;
1947
1948
1949
1950
1951
1952 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
1953
1954
1955 for (i = 0; i < ndevs_to_allocate; i++) {
1956 if (i >= HPSA_MAX_DEVICES) {
1957 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
1958 " %d devices ignored.\n", HPSA_MAX_DEVICES,
1959 ndevs_to_allocate - HPSA_MAX_DEVICES);
1960 break;
1961 }
1962
1963 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
1964 if (!currentsd[i]) {
1965 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
1966 __FILE__, __LINE__);
1967 goto out;
1968 }
1969 ndev_allocated++;
1970 }
1971
1972 if (unlikely(is_scsi_rev_5(h)))
1973 raid_ctlr_position = 0;
1974 else
1975 raid_ctlr_position = nphysicals + nlogicals;
1976
1977
1978 n_ext_target_devs = 0;
1979 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
1980 u8 *lunaddrbytes, is_OBDR = 0;
1981
1982
1983 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
1984 i, nphysicals, nlogicals, physdev_list, logdev_list);
1985
1986 if (lunaddrbytes[3] & 0xC0 &&
1987 i < nphysicals + (raid_ctlr_position == 0))
1988 continue;
1989
1990
1991 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
1992 &is_OBDR))
1993 continue;
1994 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
1995 this_device = currentsd[ncurrent];
1996
1997
1998
1999
2000
2001
2002
2003
2004 if (add_ext_target_dev(h, tmpdevice, this_device,
2005 lunaddrbytes, lunzerobits,
2006 &n_ext_target_devs)) {
2007 ncurrent++;
2008 this_device = currentsd[ncurrent];
2009 }
2010
2011 *this_device = *tmpdevice;
2012
2013 switch (this_device->devtype) {
2014 case TYPE_ROM:
2015
2016
2017
2018
2019
2020
2021
2022 if (is_OBDR)
2023 ncurrent++;
2024 break;
2025 case TYPE_DISK:
2026 if (i < nphysicals)
2027 break;
2028 ncurrent++;
2029 break;
2030 case TYPE_TAPE:
2031 case TYPE_MEDIUM_CHANGER:
2032 ncurrent++;
2033 break;
2034 case TYPE_RAID:
2035
2036
2037
2038
2039
2040 if (!is_hba_lunid(lunaddrbytes))
2041 break;
2042 ncurrent++;
2043 break;
2044 default:
2045 break;
2046 }
2047 if (ncurrent >= HPSA_MAX_DEVICES)
2048 break;
2049 }
2050 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
2051out:
2052 kfree(tmpdevice);
2053 for (i = 0; i < ndev_allocated; i++)
2054 kfree(currentsd[i]);
2055 kfree(currentsd);
2056 kfree(physdev_list);
2057 kfree(logdev_list);
2058}
2059
2060
2061
2062
2063
2064static int hpsa_scatter_gather(struct ctlr_info *h,
2065 struct CommandList *cp,
2066 struct scsi_cmnd *cmd)
2067{
2068 unsigned int len;
2069 struct scatterlist *sg;
2070 u64 addr64;
2071 int use_sg, i, sg_index, chained;
2072 struct SGDescriptor *curr_sg;
2073
2074 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
2075
2076 use_sg = scsi_dma_map(cmd);
2077 if (use_sg < 0)
2078 return use_sg;
2079
2080 if (!use_sg)
2081 goto sglist_finished;
2082
2083 curr_sg = cp->SG;
2084 chained = 0;
2085 sg_index = 0;
2086 scsi_for_each_sg(cmd, sg, use_sg, i) {
2087 if (i == h->max_cmd_sg_entries - 1 &&
2088 use_sg > h->max_cmd_sg_entries) {
2089 chained = 1;
2090 curr_sg = h->cmd_sg_list[cp->cmdindex];
2091 sg_index = 0;
2092 }
2093 addr64 = (u64) sg_dma_address(sg);
2094 len = sg_dma_len(sg);
2095 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
2096 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
2097 curr_sg->Len = len;
2098 curr_sg->Ext = 0;
2099 curr_sg++;
2100 }
2101
2102 if (use_sg + chained > h->maxSG)
2103 h->maxSG = use_sg + chained;
2104
2105 if (chained) {
2106 cp->Header.SGList = h->max_cmd_sg_entries;
2107 cp->Header.SGTotal = (u16) (use_sg + 1);
2108 hpsa_map_sg_chain_block(h, cp);
2109 return 0;
2110 }
2111
2112sglist_finished:
2113
2114 cp->Header.SGList = (u8) use_sg;
2115 cp->Header.SGTotal = (u16) use_sg;
2116 return 0;
2117}
2118
2119
2120static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
2121 void (*done)(struct scsi_cmnd *))
2122{
2123 struct ctlr_info *h;
2124 struct hpsa_scsi_dev_t *dev;
2125 unsigned char scsi3addr[8];
2126 struct CommandList *c;
2127 unsigned long flags;
2128
2129
2130 h = sdev_to_hba(cmd->device);
2131 dev = cmd->device->hostdata;
2132 if (!dev) {
2133 cmd->result = DID_NO_CONNECT << 16;
2134 done(cmd);
2135 return 0;
2136 }
2137 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
2138
2139 spin_lock_irqsave(&h->lock, flags);
2140 if (unlikely(h->lockup_detected)) {
2141 spin_unlock_irqrestore(&h->lock, flags);
2142 cmd->result = DID_ERROR << 16;
2143 done(cmd);
2144 return 0;
2145 }
2146 spin_unlock_irqrestore(&h->lock, flags);
2147 c = cmd_alloc(h);
2148 if (c == NULL) {
2149 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2150 return SCSI_MLQUEUE_HOST_BUSY;
2151 }
2152
2153
2154
2155 cmd->scsi_done = done;
2156
2157
2158 cmd->host_scribble = (unsigned char *) c;
2159
2160 c->cmd_type = CMD_SCSI;
2161 c->scsi_cmd = cmd;
2162 c->Header.ReplyQueue = 0;
2163 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
2164 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
2165 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
2166
2167
2168
2169 c->Request.Timeout = 0;
2170 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
2171 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
2172 c->Request.CDBLen = cmd->cmd_len;
2173 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
2174 c->Request.Type.Type = TYPE_CMD;
2175 c->Request.Type.Attribute = ATTR_SIMPLE;
2176 switch (cmd->sc_data_direction) {
2177 case DMA_TO_DEVICE:
2178 c->Request.Type.Direction = XFER_WRITE;
2179 break;
2180 case DMA_FROM_DEVICE:
2181 c->Request.Type.Direction = XFER_READ;
2182 break;
2183 case DMA_NONE:
2184 c->Request.Type.Direction = XFER_NONE;
2185 break;
2186 case DMA_BIDIRECTIONAL:
2187
2188
2189
2190
2191
2192 c->Request.Type.Direction = XFER_RSVD;
2193
2194
2195
2196
2197
2198
2199
2200
2201 break;
2202
2203 default:
2204 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
2205 cmd->sc_data_direction);
2206 BUG();
2207 break;
2208 }
2209
2210 if (hpsa_scatter_gather(h, c, cmd) < 0) {
2211 cmd_free(h, c);
2212 return SCSI_MLQUEUE_HOST_BUSY;
2213 }
2214 enqueue_cmd_and_start_io(h, c);
2215
2216 return 0;
2217}
2218
2219static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
2220
2221static void hpsa_scan_start(struct Scsi_Host *sh)
2222{
2223 struct ctlr_info *h = shost_to_hba(sh);
2224 unsigned long flags;
2225
2226
2227 while (1) {
2228 spin_lock_irqsave(&h->scan_lock, flags);
2229 if (h->scan_finished)
2230 break;
2231 spin_unlock_irqrestore(&h->scan_lock, flags);
2232 wait_event(h->scan_wait_queue, h->scan_finished);
2233
2234
2235
2236
2237
2238 }
2239 h->scan_finished = 0;
2240 spin_unlock_irqrestore(&h->scan_lock, flags);
2241
2242 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
2243
2244 spin_lock_irqsave(&h->scan_lock, flags);
2245 h->scan_finished = 1;
2246 wake_up_all(&h->scan_wait_queue);
2247 spin_unlock_irqrestore(&h->scan_lock, flags);
2248}
2249
2250static int hpsa_scan_finished(struct Scsi_Host *sh,
2251 unsigned long elapsed_time)
2252{
2253 struct ctlr_info *h = shost_to_hba(sh);
2254 unsigned long flags;
2255 int finished;
2256
2257 spin_lock_irqsave(&h->scan_lock, flags);
2258 finished = h->scan_finished;
2259 spin_unlock_irqrestore(&h->scan_lock, flags);
2260 return finished;
2261}
2262
2263static int hpsa_change_queue_depth(struct scsi_device *sdev,
2264 int qdepth, int reason)
2265{
2266 struct ctlr_info *h = sdev_to_hba(sdev);
2267
2268 if (reason != SCSI_QDEPTH_DEFAULT)
2269 return -ENOTSUPP;
2270
2271 if (qdepth < 1)
2272 qdepth = 1;
2273 else
2274 if (qdepth > h->nr_cmds)
2275 qdepth = h->nr_cmds;
2276 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2277 return sdev->queue_depth;
2278}
2279
2280static void hpsa_unregister_scsi(struct ctlr_info *h)
2281{
2282
2283 scsi_remove_host(h->scsi_host);
2284 scsi_host_put(h->scsi_host);
2285 h->scsi_host = NULL;
2286}
2287
2288static int hpsa_register_scsi(struct ctlr_info *h)
2289{
2290 struct Scsi_Host *sh;
2291 int error;
2292
2293 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
2294 if (sh == NULL)
2295 goto fail;
2296
2297 sh->io_port = 0;
2298 sh->n_io_port = 0;
2299 sh->this_id = -1;
2300 sh->max_channel = 3;
2301 sh->max_cmd_len = MAX_COMMAND_SIZE;
2302 sh->max_lun = HPSA_MAX_LUN;
2303 sh->max_id = HPSA_MAX_LUN;
2304 sh->can_queue = h->nr_cmds;
2305 sh->cmd_per_lun = h->nr_cmds;
2306 sh->sg_tablesize = h->maxsgentries;
2307 h->scsi_host = sh;
2308 sh->hostdata[0] = (unsigned long) h;
2309 sh->irq = h->intr[h->intr_mode];
2310 sh->unique_id = sh->irq;
2311 error = scsi_add_host(sh, &h->pdev->dev);
2312 if (error)
2313 goto fail_host_put;
2314 scsi_scan_host(sh);
2315 return 0;
2316
2317 fail_host_put:
2318 dev_err(&h->pdev->dev, "%s: scsi_add_host"
2319 " failed for controller %d\n", __func__, h->ctlr);
2320 scsi_host_put(sh);
2321 return error;
2322 fail:
2323 dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
2324 " failed for controller %d\n", __func__, h->ctlr);
2325 return -ENOMEM;
2326}
2327
2328static int wait_for_device_to_become_ready(struct ctlr_info *h,
2329 unsigned char lunaddr[])
2330{
2331 int rc = 0;
2332 int count = 0;
2333 int waittime = 1;
2334 struct CommandList *c;
2335
2336 c = cmd_special_alloc(h);
2337 if (!c) {
2338 dev_warn(&h->pdev->dev, "out of memory in "
2339 "wait_for_device_to_become_ready.\n");
2340 return IO_ERROR;
2341 }
2342
2343
2344 while (count < HPSA_TUR_RETRY_LIMIT) {
2345
2346
2347
2348
2349 msleep(1000 * waittime);
2350 count++;
2351
2352
2353 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
2354 waittime = waittime * 2;
2355
2356
2357 fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
2358 hpsa_scsi_do_simple_cmd_core(h, c);
2359
2360
2361 if (c->err_info->CommandStatus == CMD_SUCCESS)
2362 break;
2363
2364 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2365 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
2366 (c->err_info->SenseInfo[2] == NO_SENSE ||
2367 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
2368 break;
2369
2370 dev_warn(&h->pdev->dev, "waiting %d secs "
2371 "for device to become ready.\n", waittime);
2372 rc = 1;
2373 }
2374
2375 if (rc)
2376 dev_warn(&h->pdev->dev, "giving up on device.\n");
2377 else
2378 dev_warn(&h->pdev->dev, "device is ready.\n");
2379
2380 cmd_special_free(h, c);
2381 return rc;
2382}
2383
2384
2385
2386
2387static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
2388{
2389 int rc;
2390 struct ctlr_info *h;
2391 struct hpsa_scsi_dev_t *dev;
2392
2393
2394 h = sdev_to_hba(scsicmd->device);
2395 if (h == NULL)
2396 return FAILED;
2397 dev = scsicmd->device->hostdata;
2398 if (!dev) {
2399 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
2400 "device lookup failed.\n");
2401 return FAILED;
2402 }
2403 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
2404 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
2405
2406 rc = hpsa_send_reset(h, dev->scsi3addr);
2407 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
2408 return SUCCESS;
2409
2410 dev_warn(&h->pdev->dev, "resetting device failed.\n");
2411 return FAILED;
2412}
2413
2414static void swizzle_abort_tag(u8 *tag)
2415{
2416 u8 original_tag[8];
2417
2418 memcpy(original_tag, tag, 8);
2419 tag[0] = original_tag[3];
2420 tag[1] = original_tag[2];
2421 tag[2] = original_tag[1];
2422 tag[3] = original_tag[0];
2423 tag[4] = original_tag[7];
2424 tag[5] = original_tag[6];
2425 tag[6] = original_tag[5];
2426 tag[7] = original_tag[4];
2427}
2428
2429static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
2430 struct CommandList *abort, int swizzle)
2431{
2432 int rc = IO_OK;
2433 struct CommandList *c;
2434 struct ErrorInfo *ei;
2435
2436 c = cmd_special_alloc(h);
2437 if (c == NULL) {
2438 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2439 return -ENOMEM;
2440 }
2441
2442 fill_cmd(c, HPSA_ABORT_MSG, h, abort, 0, 0, scsi3addr, TYPE_MSG);
2443 if (swizzle)
2444 swizzle_abort_tag(&c->Request.CDB[4]);
2445 hpsa_scsi_do_simple_cmd_core(h, c);
2446 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
2447 __func__, abort->Header.Tag.upper, abort->Header.Tag.lower);
2448
2449
2450 ei = c->err_info;
2451 switch (ei->CommandStatus) {
2452 case CMD_SUCCESS:
2453 break;
2454 case CMD_UNABORTABLE:
2455 rc = -1;
2456 break;
2457 default:
2458 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
2459 __func__, abort->Header.Tag.upper,
2460 abort->Header.Tag.lower);
2461 hpsa_scsi_interpret_error(c);
2462 rc = -1;
2463 break;
2464 }
2465 cmd_special_free(h, c);
2466 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
2467 abort->Header.Tag.upper, abort->Header.Tag.lower);
2468 return rc;
2469}
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h,
2484 struct scsi_cmnd *find, struct list_head *queue_head)
2485{
2486 unsigned long flags;
2487 struct CommandList *c = NULL;
2488
2489 if (!find)
2490 return 0;
2491 spin_lock_irqsave(&h->lock, flags);
2492 list_for_each_entry(c, queue_head, list) {
2493 if (c->scsi_cmd == NULL)
2494 continue;
2495 if (c->scsi_cmd == find) {
2496 spin_unlock_irqrestore(&h->lock, flags);
2497 return c;
2498 }
2499 }
2500 spin_unlock_irqrestore(&h->lock, flags);
2501 return NULL;
2502}
2503
2504static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h,
2505 u8 *tag, struct list_head *queue_head)
2506{
2507 unsigned long flags;
2508 struct CommandList *c;
2509
2510 spin_lock_irqsave(&h->lock, flags);
2511 list_for_each_entry(c, queue_head, list) {
2512 if (memcmp(&c->Header.Tag, tag, 8) != 0)
2513 continue;
2514 spin_unlock_irqrestore(&h->lock, flags);
2515 return c;
2516 }
2517 spin_unlock_irqrestore(&h->lock, flags);
2518 return NULL;
2519}
2520
2521
2522
2523
2524
2525
2526
2527static int hpsa_send_abort_both_ways(struct ctlr_info *h,
2528 unsigned char *scsi3addr, struct CommandList *abort)
2529{
2530 u8 swizzled_tag[8];
2531 struct CommandList *c;
2532 int rc = 0, rc2 = 0;
2533
2534
2535
2536
2537
2538 memcpy(swizzled_tag, &abort->Request.CDB[4], 8);
2539 swizzle_abort_tag(swizzled_tag);
2540 c = hpsa_find_cmd_in_queue_by_tag(h, swizzled_tag, &h->cmpQ);
2541 if (c != NULL) {
2542 dev_warn(&h->pdev->dev, "Unexpectedly found byte-swapped tag in completion queue.\n");
2543 return hpsa_send_abort(h, scsi3addr, abort, 0);
2544 }
2545 rc = hpsa_send_abort(h, scsi3addr, abort, 0);
2546
2547
2548
2549
2550
2551 c = hpsa_find_cmd_in_queue(h, abort->scsi_cmd, &h->cmpQ);
2552 if (c)
2553 rc2 = hpsa_send_abort(h, scsi3addr, abort, 1);
2554 return rc && rc2;
2555}
2556
2557
2558
2559
2560
2561static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
2562{
2563
2564 int i, rc;
2565 struct ctlr_info *h;
2566 struct hpsa_scsi_dev_t *dev;
2567 struct CommandList *abort;
2568 struct CommandList *found;
2569 struct scsi_cmnd *as;
2570 char msg[256];
2571 int ml = 0;
2572
2573
2574 h = sdev_to_hba(sc->device);
2575 if (WARN(h == NULL,
2576 "ABORT REQUEST FAILED, Controller lookup failed.\n"))
2577 return FAILED;
2578
2579
2580 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
2581 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
2582 return FAILED;
2583
2584 memset(msg, 0, sizeof(msg));
2585 ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%d ",
2586 h->scsi_host->host_no, sc->device->channel,
2587 sc->device->id, sc->device->lun);
2588
2589
2590 dev = sc->device->hostdata;
2591 if (!dev) {
2592 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
2593 msg);
2594 return FAILED;
2595 }
2596
2597
2598 abort = (struct CommandList *) sc->host_scribble;
2599 if (abort == NULL) {
2600 dev_err(&h->pdev->dev, "%s FAILED, Command to abort is NULL.\n",
2601 msg);
2602 return FAILED;
2603 }
2604
2605 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ",
2606 abort->Header.Tag.upper, abort->Header.Tag.lower);
2607 as = (struct scsi_cmnd *) abort->scsi_cmd;
2608 if (as != NULL)
2609 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
2610 as->cmnd[0], as->serial_number);
2611 dev_dbg(&h->pdev->dev, "%s\n", msg);
2612 dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n",
2613 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
2614
2615
2616
2617
2618
2619 found = hpsa_find_cmd_in_queue(h, sc, &h->reqQ);
2620 if (found) {
2621 found->err_info->CommandStatus = CMD_ABORTED;
2622 finish_cmd(found);
2623 dev_info(&h->pdev->dev, "%s Request SUCCEEDED (driver queue).\n",
2624 msg);
2625 return SUCCESS;
2626 }
2627
2628
2629 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
2630 if (!found) {
2631 dev_dbg(&h->pdev->dev, "%s Request SUCCEEDED (not known to driver).\n",
2632 msg);
2633 return SUCCESS;
2634 }
2635
2636
2637
2638
2639
2640
2641 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort);
2642 if (rc != 0) {
2643 dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg);
2644 dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n",
2645 h->scsi_host->host_no,
2646 dev->bus, dev->target, dev->lun);
2647 return FAILED;
2648 }
2649 dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
2650
2651
2652
2653
2654
2655
2656#define ABORT_COMPLETE_WAIT_SECS 30
2657 for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
2658 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
2659 if (!found)
2660 return SUCCESS;
2661 msleep(100);
2662 }
2663 dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
2664 msg, ABORT_COMPLETE_WAIT_SECS);
2665 return FAILED;
2666}
2667
2668
2669
2670
2671
2672
2673
2674
2675static struct CommandList *cmd_alloc(struct ctlr_info *h)
2676{
2677 struct CommandList *c;
2678 int i;
2679 union u64bit temp64;
2680 dma_addr_t cmd_dma_handle, err_dma_handle;
2681 unsigned long flags;
2682
2683 spin_lock_irqsave(&h->lock, flags);
2684 do {
2685 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
2686 if (i == h->nr_cmds) {
2687 spin_unlock_irqrestore(&h->lock, flags);
2688 return NULL;
2689 }
2690 } while (test_and_set_bit
2691 (i & (BITS_PER_LONG - 1),
2692 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
2693 h->nr_allocs++;
2694 spin_unlock_irqrestore(&h->lock, flags);
2695
2696 c = h->cmd_pool + i;
2697 memset(c, 0, sizeof(*c));
2698 cmd_dma_handle = h->cmd_pool_dhandle
2699 + i * sizeof(*c);
2700 c->err_info = h->errinfo_pool + i;
2701 memset(c->err_info, 0, sizeof(*c->err_info));
2702 err_dma_handle = h->errinfo_pool_dhandle
2703 + i * sizeof(*c->err_info);
2704
2705 c->cmdindex = i;
2706
2707 INIT_LIST_HEAD(&c->list);
2708 c->busaddr = (u32) cmd_dma_handle;
2709 temp64.val = (u64) err_dma_handle;
2710 c->ErrDesc.Addr.lower = temp64.val32.lower;
2711 c->ErrDesc.Addr.upper = temp64.val32.upper;
2712 c->ErrDesc.Len = sizeof(*c->err_info);
2713
2714 c->h = h;
2715 return c;
2716}
2717
2718
2719
2720
2721
2722static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
2723{
2724 struct CommandList *c;
2725 union u64bit temp64;
2726 dma_addr_t cmd_dma_handle, err_dma_handle;
2727
2728 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
2729 if (c == NULL)
2730 return NULL;
2731 memset(c, 0, sizeof(*c));
2732
2733 c->cmdindex = -1;
2734
2735 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
2736 &err_dma_handle);
2737
2738 if (c->err_info == NULL) {
2739 pci_free_consistent(h->pdev,
2740 sizeof(*c), c, cmd_dma_handle);
2741 return NULL;
2742 }
2743 memset(c->err_info, 0, sizeof(*c->err_info));
2744
2745 INIT_LIST_HEAD(&c->list);
2746 c->busaddr = (u32) cmd_dma_handle;
2747 temp64.val = (u64) err_dma_handle;
2748 c->ErrDesc.Addr.lower = temp64.val32.lower;
2749 c->ErrDesc.Addr.upper = temp64.val32.upper;
2750 c->ErrDesc.Len = sizeof(*c->err_info);
2751
2752 c->h = h;
2753 return c;
2754}
2755
2756static void cmd_free(struct ctlr_info *h, struct CommandList *c)
2757{
2758 int i;
2759 unsigned long flags;
2760
2761 i = c - h->cmd_pool;
2762 spin_lock_irqsave(&h->lock, flags);
2763 clear_bit(i & (BITS_PER_LONG - 1),
2764 h->cmd_pool_bits + (i / BITS_PER_LONG));
2765 h->nr_frees++;
2766 spin_unlock_irqrestore(&h->lock, flags);
2767}
2768
2769static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
2770{
2771 union u64bit temp64;
2772
2773 temp64.val32.lower = c->ErrDesc.Addr.lower;
2774 temp64.val32.upper = c->ErrDesc.Addr.upper;
2775 pci_free_consistent(h->pdev, sizeof(*c->err_info),
2776 c->err_info, (dma_addr_t) temp64.val);
2777 pci_free_consistent(h->pdev, sizeof(*c),
2778 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
2779}
2780
2781#ifdef CONFIG_COMPAT
2782
2783static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
2784{
2785 IOCTL32_Command_struct __user *arg32 =
2786 (IOCTL32_Command_struct __user *) arg;
2787 IOCTL_Command_struct arg64;
2788 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
2789 int err;
2790 u32 cp;
2791
2792 memset(&arg64, 0, sizeof(arg64));
2793 err = 0;
2794 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2795 sizeof(arg64.LUN_info));
2796 err |= copy_from_user(&arg64.Request, &arg32->Request,
2797 sizeof(arg64.Request));
2798 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2799 sizeof(arg64.error_info));
2800 err |= get_user(arg64.buf_size, &arg32->buf_size);
2801 err |= get_user(cp, &arg32->buf);
2802 arg64.buf = compat_ptr(cp);
2803 err |= copy_to_user(p, &arg64, sizeof(arg64));
2804
2805 if (err)
2806 return -EFAULT;
2807
2808 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p);
2809 if (err)
2810 return err;
2811 err |= copy_in_user(&arg32->error_info, &p->error_info,
2812 sizeof(arg32->error_info));
2813 if (err)
2814 return -EFAULT;
2815 return err;
2816}
2817
2818static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2819 int cmd, void *arg)
2820{
2821 BIG_IOCTL32_Command_struct __user *arg32 =
2822 (BIG_IOCTL32_Command_struct __user *) arg;
2823 BIG_IOCTL_Command_struct arg64;
2824 BIG_IOCTL_Command_struct __user *p =
2825 compat_alloc_user_space(sizeof(arg64));
2826 int err;
2827 u32 cp;
2828
2829 memset(&arg64, 0, sizeof(arg64));
2830 err = 0;
2831 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2832 sizeof(arg64.LUN_info));
2833 err |= copy_from_user(&arg64.Request, &arg32->Request,
2834 sizeof(arg64.Request));
2835 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2836 sizeof(arg64.error_info));
2837 err |= get_user(arg64.buf_size, &arg32->buf_size);
2838 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
2839 err |= get_user(cp, &arg32->buf);
2840 arg64.buf = compat_ptr(cp);
2841 err |= copy_to_user(p, &arg64, sizeof(arg64));
2842
2843 if (err)
2844 return -EFAULT;
2845
2846 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
2847 if (err)
2848 return err;
2849 err |= copy_in_user(&arg32->error_info, &p->error_info,
2850 sizeof(arg32->error_info));
2851 if (err)
2852 return -EFAULT;
2853 return err;
2854}
2855
2856static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
2857{
2858 switch (cmd) {
2859 case CCISS_GETPCIINFO:
2860 case CCISS_GETINTINFO:
2861 case CCISS_SETINTINFO:
2862 case CCISS_GETNODENAME:
2863 case CCISS_SETNODENAME:
2864 case CCISS_GETHEARTBEAT:
2865 case CCISS_GETBUSTYPES:
2866 case CCISS_GETFIRMVER:
2867 case CCISS_GETDRIVVER:
2868 case CCISS_REVALIDVOLS:
2869 case CCISS_DEREGDISK:
2870 case CCISS_REGNEWDISK:
2871 case CCISS_REGNEWD:
2872 case CCISS_RESCANDISK:
2873 case CCISS_GETLUNINFO:
2874 return hpsa_ioctl(dev, cmd, arg);
2875
2876 case CCISS_PASSTHRU32:
2877 return hpsa_ioctl32_passthru(dev, cmd, arg);
2878 case CCISS_BIG_PASSTHRU32:
2879 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
2880
2881 default:
2882 return -ENOIOCTLCMD;
2883 }
2884}
2885#endif
2886
2887static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
2888{
2889 struct hpsa_pci_info pciinfo;
2890
2891 if (!argp)
2892 return -EINVAL;
2893 pciinfo.domain = pci_domain_nr(h->pdev->bus);
2894 pciinfo.bus = h->pdev->bus->number;
2895 pciinfo.dev_fn = h->pdev->devfn;
2896 pciinfo.board_id = h->board_id;
2897 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
2898 return -EFAULT;
2899 return 0;
2900}
2901
2902static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
2903{
2904 DriverVer_type DriverVer;
2905 unsigned char vmaj, vmin, vsubmin;
2906 int rc;
2907
2908 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
2909 &vmaj, &vmin, &vsubmin);
2910 if (rc != 3) {
2911 dev_info(&h->pdev->dev, "driver version string '%s' "
2912 "unrecognized.", HPSA_DRIVER_VERSION);
2913 vmaj = 0;
2914 vmin = 0;
2915 vsubmin = 0;
2916 }
2917 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
2918 if (!argp)
2919 return -EINVAL;
2920 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
2921 return -EFAULT;
2922 return 0;
2923}
2924
2925static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2926{
2927 IOCTL_Command_struct iocommand;
2928 struct CommandList *c;
2929 char *buff = NULL;
2930 union u64bit temp64;
2931
2932 if (!argp)
2933 return -EINVAL;
2934 if (!capable(CAP_SYS_RAWIO))
2935 return -EPERM;
2936 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
2937 return -EFAULT;
2938 if ((iocommand.buf_size < 1) &&
2939 (iocommand.Request.Type.Direction != XFER_NONE)) {
2940 return -EINVAL;
2941 }
2942 if (iocommand.buf_size > 0) {
2943 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
2944 if (buff == NULL)
2945 return -EFAULT;
2946 if (iocommand.Request.Type.Direction == XFER_WRITE) {
2947
2948 if (copy_from_user(buff, iocommand.buf,
2949 iocommand.buf_size)) {
2950 kfree(buff);
2951 return -EFAULT;
2952 }
2953 } else {
2954 memset(buff, 0, iocommand.buf_size);
2955 }
2956 }
2957 c = cmd_special_alloc(h);
2958 if (c == NULL) {
2959 kfree(buff);
2960 return -ENOMEM;
2961 }
2962
2963 c->cmd_type = CMD_IOCTL_PEND;
2964
2965 c->Header.ReplyQueue = 0;
2966 if (iocommand.buf_size > 0) {
2967 c->Header.SGList = 1;
2968 c->Header.SGTotal = 1;
2969 } else {
2970 c->Header.SGList = 0;
2971 c->Header.SGTotal = 0;
2972 }
2973 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
2974
2975 c->Header.Tag.lower = c->busaddr;
2976
2977
2978 memcpy(&c->Request, &iocommand.Request,
2979 sizeof(c->Request));
2980
2981
2982 if (iocommand.buf_size > 0) {
2983 temp64.val = pci_map_single(h->pdev, buff,
2984 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
2985 c->SG[0].Addr.lower = temp64.val32.lower;
2986 c->SG[0].Addr.upper = temp64.val32.upper;
2987 c->SG[0].Len = iocommand.buf_size;
2988 c->SG[0].Ext = 0;
2989 }
2990 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
2991 if (iocommand.buf_size > 0)
2992 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
2993 check_ioctl_unit_attention(h, c);
2994
2995
2996 memcpy(&iocommand.error_info, c->err_info,
2997 sizeof(iocommand.error_info));
2998 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
2999 kfree(buff);
3000 cmd_special_free(h, c);
3001 return -EFAULT;
3002 }
3003 if (iocommand.Request.Type.Direction == XFER_READ &&
3004 iocommand.buf_size > 0) {
3005
3006 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
3007 kfree(buff);
3008 cmd_special_free(h, c);
3009 return -EFAULT;
3010 }
3011 }
3012 kfree(buff);
3013 cmd_special_free(h, c);
3014 return 0;
3015}
3016
3017static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
3018{
3019 BIG_IOCTL_Command_struct *ioc;
3020 struct CommandList *c;
3021 unsigned char **buff = NULL;
3022 int *buff_size = NULL;
3023 union u64bit temp64;
3024 BYTE sg_used = 0;
3025 int status = 0;
3026 int i;
3027 u32 left;
3028 u32 sz;
3029 BYTE __user *data_ptr;
3030
3031 if (!argp)
3032 return -EINVAL;
3033 if (!capable(CAP_SYS_RAWIO))
3034 return -EPERM;
3035 ioc = (BIG_IOCTL_Command_struct *)
3036 kmalloc(sizeof(*ioc), GFP_KERNEL);
3037 if (!ioc) {
3038 status = -ENOMEM;
3039 goto cleanup1;
3040 }
3041 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
3042 status = -EFAULT;
3043 goto cleanup1;
3044 }
3045 if ((ioc->buf_size < 1) &&
3046 (ioc->Request.Type.Direction != XFER_NONE)) {
3047 status = -EINVAL;
3048 goto cleanup1;
3049 }
3050
3051 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
3052 status = -EINVAL;
3053 goto cleanup1;
3054 }
3055 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
3056 status = -EINVAL;
3057 goto cleanup1;
3058 }
3059 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
3060 if (!buff) {
3061 status = -ENOMEM;
3062 goto cleanup1;
3063 }
3064 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
3065 if (!buff_size) {
3066 status = -ENOMEM;
3067 goto cleanup1;
3068 }
3069 left = ioc->buf_size;
3070 data_ptr = ioc->buf;
3071 while (left) {
3072 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
3073 buff_size[sg_used] = sz;
3074 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
3075 if (buff[sg_used] == NULL) {
3076 status = -ENOMEM;
3077 goto cleanup1;
3078 }
3079 if (ioc->Request.Type.Direction == XFER_WRITE) {
3080 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
3081 status = -ENOMEM;
3082 goto cleanup1;
3083 }
3084 } else
3085 memset(buff[sg_used], 0, sz);
3086 left -= sz;
3087 data_ptr += sz;
3088 sg_used++;
3089 }
3090 c = cmd_special_alloc(h);
3091 if (c == NULL) {
3092 status = -ENOMEM;
3093 goto cleanup1;
3094 }
3095 c->cmd_type = CMD_IOCTL_PEND;
3096 c->Header.ReplyQueue = 0;
3097 c->Header.SGList = c->Header.SGTotal = sg_used;
3098 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
3099 c->Header.Tag.lower = c->busaddr;
3100 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
3101 if (ioc->buf_size > 0) {
3102 int i;
3103 for (i = 0; i < sg_used; i++) {
3104 temp64.val = pci_map_single(h->pdev, buff[i],
3105 buff_size[i], PCI_DMA_BIDIRECTIONAL);
3106 c->SG[i].Addr.lower = temp64.val32.lower;
3107 c->SG[i].Addr.upper = temp64.val32.upper;
3108 c->SG[i].Len = buff_size[i];
3109
3110 c->SG[i].Ext = 0;
3111 }
3112 }
3113 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
3114 if (sg_used)
3115 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
3116 check_ioctl_unit_attention(h, c);
3117
3118 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
3119 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
3120 cmd_special_free(h, c);
3121 status = -EFAULT;
3122 goto cleanup1;
3123 }
3124 if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) {
3125
3126 BYTE __user *ptr = ioc->buf;
3127 for (i = 0; i < sg_used; i++) {
3128 if (copy_to_user(ptr, buff[i], buff_size[i])) {
3129 cmd_special_free(h, c);
3130 status = -EFAULT;
3131 goto cleanup1;
3132 }
3133 ptr += buff_size[i];
3134 }
3135 }
3136 cmd_special_free(h, c);
3137 status = 0;
3138cleanup1:
3139 if (buff) {
3140 for (i = 0; i < sg_used; i++)
3141 kfree(buff[i]);
3142 kfree(buff);
3143 }
3144 kfree(buff_size);
3145 kfree(ioc);
3146 return status;
3147}
3148
3149static void check_ioctl_unit_attention(struct ctlr_info *h,
3150 struct CommandList *c)
3151{
3152 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
3153 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
3154 (void) check_for_unit_attention(h, c);
3155}
3156
3157
3158
3159static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
3160{
3161 struct ctlr_info *h;
3162 void __user *argp = (void __user *)arg;
3163
3164 h = sdev_to_hba(dev);
3165
3166 switch (cmd) {
3167 case CCISS_DEREGDISK:
3168 case CCISS_REGNEWDISK:
3169 case CCISS_REGNEWD:
3170 hpsa_scan_start(h->scsi_host);
3171 return 0;
3172 case CCISS_GETPCIINFO:
3173 return hpsa_getpciinfo_ioctl(h, argp);
3174 case CCISS_GETDRIVVER:
3175 return hpsa_getdrivver_ioctl(h, argp);
3176 case CCISS_PASSTHRU:
3177 return hpsa_passthru_ioctl(h, argp);
3178 case CCISS_BIG_PASSTHRU:
3179 return hpsa_big_passthru_ioctl(h, argp);
3180 default:
3181 return -ENOTTY;
3182 }
3183}
3184
3185static int __devinit hpsa_send_host_reset(struct ctlr_info *h,
3186 unsigned char *scsi3addr, u8 reset_type)
3187{
3188 struct CommandList *c;
3189
3190 c = cmd_alloc(h);
3191 if (!c)
3192 return -ENOMEM;
3193 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
3194 RAID_CTLR_LUNID, TYPE_MSG);
3195 c->Request.CDB[1] = reset_type;
3196 c->waiting = NULL;
3197 enqueue_cmd_and_start_io(h, c);
3198
3199
3200
3201
3202 return 0;
3203}
3204
3205static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
3206 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
3207 int cmd_type)
3208{
3209 int pci_dir = XFER_NONE;
3210 struct CommandList *a;
3211
3212 c->cmd_type = CMD_IOCTL_PEND;
3213 c->Header.ReplyQueue = 0;
3214 if (buff != NULL && size > 0) {
3215 c->Header.SGList = 1;
3216 c->Header.SGTotal = 1;
3217 } else {
3218 c->Header.SGList = 0;
3219 c->Header.SGTotal = 0;
3220 }
3221 c->Header.Tag.lower = c->busaddr;
3222 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
3223
3224 c->Request.Type.Type = cmd_type;
3225 if (cmd_type == TYPE_CMD) {
3226 switch (cmd) {
3227 case HPSA_INQUIRY:
3228
3229 if (page_code != 0) {
3230 c->Request.CDB[1] = 0x01;
3231 c->Request.CDB[2] = page_code;
3232 }
3233 c->Request.CDBLen = 6;
3234 c->Request.Type.Attribute = ATTR_SIMPLE;
3235 c->Request.Type.Direction = XFER_READ;
3236 c->Request.Timeout = 0;
3237 c->Request.CDB[0] = HPSA_INQUIRY;
3238 c->Request.CDB[4] = size & 0xFF;
3239 break;
3240 case HPSA_REPORT_LOG:
3241 case HPSA_REPORT_PHYS:
3242
3243
3244
3245 c->Request.CDBLen = 12;
3246 c->Request.Type.Attribute = ATTR_SIMPLE;
3247 c->Request.Type.Direction = XFER_READ;
3248 c->Request.Timeout = 0;
3249 c->Request.CDB[0] = cmd;
3250 c->Request.CDB[6] = (size >> 24) & 0xFF;
3251 c->Request.CDB[7] = (size >> 16) & 0xFF;
3252 c->Request.CDB[8] = (size >> 8) & 0xFF;
3253 c->Request.CDB[9] = size & 0xFF;
3254 break;
3255 case HPSA_CACHE_FLUSH:
3256 c->Request.CDBLen = 12;
3257 c->Request.Type.Attribute = ATTR_SIMPLE;
3258 c->Request.Type.Direction = XFER_WRITE;
3259 c->Request.Timeout = 0;
3260 c->Request.CDB[0] = BMIC_WRITE;
3261 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
3262 c->Request.CDB[7] = (size >> 8) & 0xFF;
3263 c->Request.CDB[8] = size & 0xFF;
3264 break;
3265 case TEST_UNIT_READY:
3266 c->Request.CDBLen = 6;
3267 c->Request.Type.Attribute = ATTR_SIMPLE;
3268 c->Request.Type.Direction = XFER_NONE;
3269 c->Request.Timeout = 0;
3270 break;
3271 default:
3272 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
3273 BUG();
3274 return;
3275 }
3276 } else if (cmd_type == TYPE_MSG) {
3277 switch (cmd) {
3278
3279 case HPSA_DEVICE_RESET_MSG:
3280 c->Request.CDBLen = 16;
3281 c->Request.Type.Type = 1;
3282 c->Request.Type.Attribute = ATTR_SIMPLE;
3283 c->Request.Type.Direction = XFER_NONE;
3284 c->Request.Timeout = 0;
3285 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
3286 c->Request.CDB[0] = cmd;
3287 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
3288
3289
3290 c->Request.CDB[4] = 0x00;
3291 c->Request.CDB[5] = 0x00;
3292 c->Request.CDB[6] = 0x00;
3293 c->Request.CDB[7] = 0x00;
3294 break;
3295 case HPSA_ABORT_MSG:
3296 a = buff;
3297 dev_dbg(&h->pdev->dev, "Abort Tag:0x%08x:%08x using request Tag:0x%08x:%08x\n",
3298 a->Header.Tag.upper, a->Header.Tag.lower,
3299 c->Header.Tag.upper, c->Header.Tag.lower);
3300 c->Request.CDBLen = 16;
3301 c->Request.Type.Type = TYPE_MSG;
3302 c->Request.Type.Attribute = ATTR_SIMPLE;
3303 c->Request.Type.Direction = XFER_WRITE;
3304 c->Request.Timeout = 0;
3305 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
3306 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
3307 c->Request.CDB[2] = 0x00;
3308 c->Request.CDB[3] = 0x00;
3309
3310 c->Request.CDB[4] = a->Header.Tag.lower & 0xFF;
3311 c->Request.CDB[5] = (a->Header.Tag.lower >> 8) & 0xFF;
3312 c->Request.CDB[6] = (a->Header.Tag.lower >> 16) & 0xFF;
3313 c->Request.CDB[7] = (a->Header.Tag.lower >> 24) & 0xFF;
3314 c->Request.CDB[8] = a->Header.Tag.upper & 0xFF;
3315 c->Request.CDB[9] = (a->Header.Tag.upper >> 8) & 0xFF;
3316 c->Request.CDB[10] = (a->Header.Tag.upper >> 16) & 0xFF;
3317 c->Request.CDB[11] = (a->Header.Tag.upper >> 24) & 0xFF;
3318 c->Request.CDB[12] = 0x00;
3319 c->Request.CDB[13] = 0x00;
3320 c->Request.CDB[14] = 0x00;
3321 c->Request.CDB[15] = 0x00;
3322 break;
3323 default:
3324 dev_warn(&h->pdev->dev, "unknown message type %d\n",
3325 cmd);
3326 BUG();
3327 }
3328 } else {
3329 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
3330 BUG();
3331 }
3332
3333 switch (c->Request.Type.Direction) {
3334 case XFER_READ:
3335 pci_dir = PCI_DMA_FROMDEVICE;
3336 break;
3337 case XFER_WRITE:
3338 pci_dir = PCI_DMA_TODEVICE;
3339 break;
3340 case XFER_NONE:
3341 pci_dir = PCI_DMA_NONE;
3342 break;
3343 default:
3344 pci_dir = PCI_DMA_BIDIRECTIONAL;
3345 }
3346
3347 hpsa_map_one(h->pdev, c, buff, size, pci_dir);
3348
3349 return;
3350}
3351
3352
3353
3354
3355static void __iomem *remap_pci_mem(ulong base, ulong size)
3356{
3357 ulong page_base = ((ulong) base) & PAGE_MASK;
3358 ulong page_offs = ((ulong) base) - page_base;
3359 void __iomem *page_remapped = ioremap_nocache(page_base,
3360 page_offs + size);
3361
3362 return page_remapped ? (page_remapped + page_offs) : NULL;
3363}
3364
3365
3366
3367
3368static void start_io(struct ctlr_info *h)
3369{
3370 struct CommandList *c;
3371 unsigned long flags;
3372
3373 spin_lock_irqsave(&h->lock, flags);
3374 while (!list_empty(&h->reqQ)) {
3375 c = list_entry(h->reqQ.next, struct CommandList, list);
3376
3377 if ((h->access.fifo_full(h))) {
3378 dev_warn(&h->pdev->dev, "fifo full\n");
3379 break;
3380 }
3381
3382
3383 removeQ(c);
3384 h->Qdepth--;
3385
3386
3387 addQ(&h->cmpQ, c);
3388
3389
3390
3391
3392
3393 h->commands_outstanding++;
3394 if (h->commands_outstanding > h->max_outstanding)
3395 h->max_outstanding = h->commands_outstanding;
3396
3397
3398 spin_unlock_irqrestore(&h->lock, flags);
3399 h->access.submit_command(h, c);
3400 spin_lock_irqsave(&h->lock, flags);
3401 }
3402 spin_unlock_irqrestore(&h->lock, flags);
3403}
3404
3405static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
3406{
3407 return h->access.command_completed(h, q);
3408}
3409
3410static inline bool interrupt_pending(struct ctlr_info *h)
3411{
3412 return h->access.intr_pending(h);
3413}
3414
3415static inline long interrupt_not_for_us(struct ctlr_info *h)
3416{
3417 return (h->access.intr_pending(h) == 0) ||
3418 (h->interrupts_enabled == 0);
3419}
3420
3421static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
3422 u32 raw_tag)
3423{
3424 if (unlikely(tag_index >= h->nr_cmds)) {
3425 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
3426 return 1;
3427 }
3428 return 0;
3429}
3430
3431static inline void finish_cmd(struct CommandList *c)
3432{
3433 unsigned long flags;
3434
3435 spin_lock_irqsave(&c->h->lock, flags);
3436 removeQ(c);
3437 spin_unlock_irqrestore(&c->h->lock, flags);
3438 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
3439 if (likely(c->cmd_type == CMD_SCSI))
3440 complete_scsi_command(c);
3441 else if (c->cmd_type == CMD_IOCTL_PEND)
3442 complete(c->waiting);
3443}
3444
3445static inline u32 hpsa_tag_contains_index(u32 tag)
3446{
3447 return tag & DIRECT_LOOKUP_BIT;
3448}
3449
3450static inline u32 hpsa_tag_to_index(u32 tag)
3451{
3452 return tag >> DIRECT_LOOKUP_SHIFT;
3453}
3454
3455
3456static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
3457{
3458#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
3459#define HPSA_SIMPLE_ERROR_BITS 0x03
3460 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
3461 return tag & ~HPSA_SIMPLE_ERROR_BITS;
3462 return tag & ~HPSA_PERF_ERROR_BITS;
3463}
3464
3465
3466static inline void process_indexed_cmd(struct ctlr_info *h,
3467 u32 raw_tag)
3468{
3469 u32 tag_index;
3470 struct CommandList *c;
3471
3472 tag_index = hpsa_tag_to_index(raw_tag);
3473 if (!bad_tag(h, tag_index, raw_tag)) {
3474 c = h->cmd_pool + tag_index;
3475 finish_cmd(c);
3476 }
3477}
3478
3479
3480static inline void process_nonindexed_cmd(struct ctlr_info *h,
3481 u32 raw_tag)
3482{
3483 u32 tag;
3484 struct CommandList *c = NULL;
3485 unsigned long flags;
3486
3487 tag = hpsa_tag_discard_error_bits(h, raw_tag);
3488 spin_lock_irqsave(&h->lock, flags);
3489 list_for_each_entry(c, &h->cmpQ, list) {
3490 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
3491 spin_unlock_irqrestore(&h->lock, flags);
3492 finish_cmd(c);
3493 return;
3494 }
3495 }
3496 spin_unlock_irqrestore(&h->lock, flags);
3497 bad_tag(h, h->nr_cmds + 1, raw_tag);
3498}
3499
3500
3501
3502
3503
3504
3505static int ignore_bogus_interrupt(struct ctlr_info *h)
3506{
3507 if (likely(!reset_devices))
3508 return 0;
3509
3510 if (likely(h->interrupts_enabled))
3511 return 0;
3512
3513 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
3514 "(known firmware bug.) Ignoring.\n");
3515
3516 return 1;
3517}
3518
3519
3520
3521
3522
3523
3524static struct ctlr_info *queue_to_hba(u8 *queue)
3525{
3526 return container_of((queue - *queue), struct ctlr_info, q[0]);
3527}
3528
3529static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
3530{
3531 struct ctlr_info *h = queue_to_hba(queue);
3532 u8 q = *(u8 *) queue;
3533 u32 raw_tag;
3534
3535 if (ignore_bogus_interrupt(h))
3536 return IRQ_NONE;
3537
3538 if (interrupt_not_for_us(h))
3539 return IRQ_NONE;
3540 h->last_intr_timestamp = get_jiffies_64();
3541 while (interrupt_pending(h)) {
3542 raw_tag = get_next_completion(h, q);
3543 while (raw_tag != FIFO_EMPTY)
3544 raw_tag = next_command(h, q);
3545 }
3546 return IRQ_HANDLED;
3547}
3548
3549static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
3550{
3551 struct ctlr_info *h = queue_to_hba(queue);
3552 u32 raw_tag;
3553 u8 q = *(u8 *) queue;
3554
3555 if (ignore_bogus_interrupt(h))
3556 return IRQ_NONE;
3557
3558 h->last_intr_timestamp = get_jiffies_64();
3559 raw_tag = get_next_completion(h, q);
3560 while (raw_tag != FIFO_EMPTY)
3561 raw_tag = next_command(h, q);
3562 return IRQ_HANDLED;
3563}
3564
3565static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
3566{
3567 struct ctlr_info *h = queue_to_hba((u8 *) queue);
3568 u32 raw_tag;
3569 u8 q = *(u8 *) queue;
3570
3571 if (interrupt_not_for_us(h))
3572 return IRQ_NONE;
3573 h->last_intr_timestamp = get_jiffies_64();
3574 while (interrupt_pending(h)) {
3575 raw_tag = get_next_completion(h, q);
3576 while (raw_tag != FIFO_EMPTY) {
3577 if (likely(hpsa_tag_contains_index(raw_tag)))
3578 process_indexed_cmd(h, raw_tag);
3579 else
3580 process_nonindexed_cmd(h, raw_tag);
3581 raw_tag = next_command(h, q);
3582 }
3583 }
3584 return IRQ_HANDLED;
3585}
3586
3587static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
3588{
3589 struct ctlr_info *h = queue_to_hba(queue);
3590 u32 raw_tag;
3591 u8 q = *(u8 *) queue;
3592
3593 h->last_intr_timestamp = get_jiffies_64();
3594 raw_tag = get_next_completion(h, q);
3595 while (raw_tag != FIFO_EMPTY) {
3596 if (likely(hpsa_tag_contains_index(raw_tag)))
3597 process_indexed_cmd(h, raw_tag);
3598 else
3599 process_nonindexed_cmd(h, raw_tag);
3600 raw_tag = next_command(h, q);
3601 }
3602 return IRQ_HANDLED;
3603}
3604
3605
3606
3607
3608
3609static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
3610 unsigned char type)
3611{
3612 struct Command {
3613 struct CommandListHeader CommandHeader;
3614 struct RequestBlock Request;
3615 struct ErrDescriptor ErrorDescriptor;
3616 };
3617 struct Command *cmd;
3618 static const size_t cmd_sz = sizeof(*cmd) +
3619 sizeof(cmd->ErrorDescriptor);
3620 dma_addr_t paddr64;
3621 uint32_t paddr32, tag;
3622 void __iomem *vaddr;
3623 int i, err;
3624
3625 vaddr = pci_ioremap_bar(pdev, 0);
3626 if (vaddr == NULL)
3627 return -ENOMEM;
3628
3629
3630
3631
3632
3633 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3634 if (err) {
3635 iounmap(vaddr);
3636 return -ENOMEM;
3637 }
3638
3639 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
3640 if (cmd == NULL) {
3641 iounmap(vaddr);
3642 return -ENOMEM;
3643 }
3644
3645
3646
3647
3648
3649 paddr32 = paddr64;
3650
3651 cmd->CommandHeader.ReplyQueue = 0;
3652 cmd->CommandHeader.SGList = 0;
3653 cmd->CommandHeader.SGTotal = 0;
3654 cmd->CommandHeader.Tag.lower = paddr32;
3655 cmd->CommandHeader.Tag.upper = 0;
3656 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
3657
3658 cmd->Request.CDBLen = 16;
3659 cmd->Request.Type.Type = TYPE_MSG;
3660 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
3661 cmd->Request.Type.Direction = XFER_NONE;
3662 cmd->Request.Timeout = 0;
3663 cmd->Request.CDB[0] = opcode;
3664 cmd->Request.CDB[1] = type;
3665 memset(&cmd->Request.CDB[2], 0, 14);
3666 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
3667 cmd->ErrorDescriptor.Addr.upper = 0;
3668 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
3669
3670 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
3671
3672 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
3673 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
3674 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
3675 break;
3676 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
3677 }
3678
3679 iounmap(vaddr);
3680
3681
3682
3683
3684 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
3685 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
3686 opcode, type);
3687 return -ETIMEDOUT;
3688 }
3689
3690 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
3691
3692 if (tag & HPSA_ERROR_BIT) {
3693 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
3694 opcode, type);
3695 return -EIO;
3696 }
3697
3698 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
3699 opcode, type);
3700 return 0;
3701}
3702
3703#define hpsa_noop(p) hpsa_message(p, 3, 0)
3704
3705static int hpsa_controller_hard_reset(struct pci_dev *pdev,
3706 void * __iomem vaddr, u32 use_doorbell)
3707{
3708 u16 pmcsr;
3709 int pos;
3710
3711 if (use_doorbell) {
3712
3713
3714
3715
3716 dev_info(&pdev->dev, "using doorbell to reset controller\n");
3717 writel(use_doorbell, vaddr + SA5_DOORBELL);
3718 } else {
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
3729 if (pos == 0) {
3730 dev_err(&pdev->dev,
3731 "hpsa_reset_controller: "
3732 "PCI PM not supported\n");
3733 return -ENODEV;
3734 }
3735 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
3736
3737 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
3738 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3739 pmcsr |= PCI_D3hot;
3740 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3741
3742 msleep(500);
3743
3744
3745 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3746 pmcsr |= PCI_D0;
3747 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3748
3749
3750
3751
3752
3753
3754 msleep(500);
3755 }
3756 return 0;
3757}
3758
3759static __devinit void init_driver_version(char *driver_version, int len)
3760{
3761 memset(driver_version, 0, len);
3762 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
3763}
3764
3765static __devinit int write_driver_ver_to_cfgtable(
3766 struct CfgTable __iomem *cfgtable)
3767{
3768 char *driver_version;
3769 int i, size = sizeof(cfgtable->driver_version);
3770
3771 driver_version = kmalloc(size, GFP_KERNEL);
3772 if (!driver_version)
3773 return -ENOMEM;
3774
3775 init_driver_version(driver_version, size);
3776 for (i = 0; i < size; i++)
3777 writeb(driver_version[i], &cfgtable->driver_version[i]);
3778 kfree(driver_version);
3779 return 0;
3780}
3781
3782static __devinit void read_driver_ver_from_cfgtable(
3783 struct CfgTable __iomem *cfgtable, unsigned char *driver_ver)
3784{
3785 int i;
3786
3787 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
3788 driver_ver[i] = readb(&cfgtable->driver_version[i]);
3789}
3790
3791static __devinit int controller_reset_failed(
3792 struct CfgTable __iomem *cfgtable)
3793{
3794
3795 char *driver_ver, *old_driver_ver;
3796 int rc, size = sizeof(cfgtable->driver_version);
3797
3798 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
3799 if (!old_driver_ver)
3800 return -ENOMEM;
3801 driver_ver = old_driver_ver + size;
3802
3803
3804
3805
3806 init_driver_version(old_driver_ver, size);
3807 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
3808 rc = !memcmp(driver_ver, old_driver_ver, size);
3809 kfree(old_driver_ver);
3810 return rc;
3811}
3812
3813
3814
3815static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3816{
3817 u64 cfg_offset;
3818 u32 cfg_base_addr;
3819 u64 cfg_base_addr_index;
3820 void __iomem *vaddr;
3821 unsigned long paddr;
3822 u32 misc_fw_support;
3823 int rc;
3824 struct CfgTable __iomem *cfgtable;
3825 u32 use_doorbell;
3826 u32 board_id;
3827 u16 command_register;
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842 rc = hpsa_lookup_board_id(pdev, &board_id);
3843 if (rc < 0 || !ctlr_is_resettable(board_id)) {
3844 dev_warn(&pdev->dev, "Not resetting device.\n");
3845 return -ENODEV;
3846 }
3847
3848
3849 if (!ctlr_is_hard_resettable(board_id))
3850 return -ENOTSUPP;
3851
3852
3853 pci_read_config_word(pdev, 4, &command_register);
3854
3855
3856
3857 pci_disable_device(pdev);
3858 pci_save_state(pdev);
3859
3860
3861 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
3862 if (rc)
3863 return rc;
3864 vaddr = remap_pci_mem(paddr, 0x250);
3865 if (!vaddr)
3866 return -ENOMEM;
3867
3868
3869 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
3870 &cfg_base_addr_index, &cfg_offset);
3871 if (rc)
3872 goto unmap_vaddr;
3873 cfgtable = remap_pci_mem(pci_resource_start(pdev,
3874 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
3875 if (!cfgtable) {
3876 rc = -ENOMEM;
3877 goto unmap_vaddr;
3878 }
3879 rc = write_driver_ver_to_cfgtable(cfgtable);
3880 if (rc)
3881 goto unmap_vaddr;
3882
3883
3884
3885
3886 misc_fw_support = readl(&cfgtable->misc_fw_support);
3887 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
3888 if (use_doorbell) {
3889 use_doorbell = DOORBELL_CTLR_RESET2;
3890 } else {
3891 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
3892 if (use_doorbell) {
3893 dev_warn(&pdev->dev, "Soft reset not supported. "
3894 "Firmware update is required.\n");
3895 rc = -ENOTSUPP;
3896 goto unmap_cfgtable;
3897 }
3898 }
3899
3900 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
3901 if (rc)
3902 goto unmap_cfgtable;
3903
3904 pci_restore_state(pdev);
3905 rc = pci_enable_device(pdev);
3906 if (rc) {
3907 dev_warn(&pdev->dev, "failed to enable device.\n");
3908 goto unmap_cfgtable;
3909 }
3910 pci_write_config_word(pdev, 4, command_register);
3911
3912
3913
3914 msleep(HPSA_POST_RESET_PAUSE_MSECS);
3915
3916
3917 dev_info(&pdev->dev, "Waiting for board to reset.\n");
3918 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
3919 if (rc) {
3920 dev_warn(&pdev->dev,
3921 "failed waiting for board to reset."
3922 " Will try soft reset.\n");
3923 rc = -ENOTSUPP;
3924 goto unmap_cfgtable;
3925 }
3926 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
3927 if (rc) {
3928 dev_warn(&pdev->dev,
3929 "failed waiting for board to become ready "
3930 "after hard reset\n");
3931 goto unmap_cfgtable;
3932 }
3933
3934 rc = controller_reset_failed(vaddr);
3935 if (rc < 0)
3936 goto unmap_cfgtable;
3937 if (rc) {
3938 dev_warn(&pdev->dev, "Unable to successfully reset "
3939 "controller. Will try soft reset.\n");
3940 rc = -ENOTSUPP;
3941 } else {
3942 dev_info(&pdev->dev, "board ready after hard reset.\n");
3943 }
3944
3945unmap_cfgtable:
3946 iounmap(cfgtable);
3947
3948unmap_vaddr:
3949 iounmap(vaddr);
3950 return rc;
3951}
3952
3953
3954
3955
3956
3957
3958static void print_cfg_table(struct device *dev, struct CfgTable *tb)
3959{
3960#ifdef HPSA_DEBUG
3961 int i;
3962 char temp_name[17];
3963
3964 dev_info(dev, "Controller Configuration information\n");
3965 dev_info(dev, "------------------------------------\n");
3966 for (i = 0; i < 4; i++)
3967 temp_name[i] = readb(&(tb->Signature[i]));
3968 temp_name[4] = '\0';
3969 dev_info(dev, " Signature = %s\n", temp_name);
3970 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
3971 dev_info(dev, " Transport methods supported = 0x%x\n",
3972 readl(&(tb->TransportSupport)));
3973 dev_info(dev, " Transport methods active = 0x%x\n",
3974 readl(&(tb->TransportActive)));
3975 dev_info(dev, " Requested transport Method = 0x%x\n",
3976 readl(&(tb->HostWrite.TransportRequest)));
3977 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
3978 readl(&(tb->HostWrite.CoalIntDelay)));
3979 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
3980 readl(&(tb->HostWrite.CoalIntCount)));
3981 dev_info(dev, " Max outstanding commands = 0x%d\n",
3982 readl(&(tb->CmdsOutMax)));
3983 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
3984 for (i = 0; i < 16; i++)
3985 temp_name[i] = readb(&(tb->ServerName[i]));
3986 temp_name[16] = '\0';
3987 dev_info(dev, " Server Name = %s\n", temp_name);
3988 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
3989 readl(&(tb->HeartBeat)));
3990#endif
3991}
3992
3993static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3994{
3995 int i, offset, mem_type, bar_type;
3996
3997 if (pci_bar_addr == PCI_BASE_ADDRESS_0)
3998 return 0;
3999 offset = 0;
4000 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
4001 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
4002 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
4003 offset += 4;
4004 else {
4005 mem_type = pci_resource_flags(pdev, i) &
4006 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
4007 switch (mem_type) {
4008 case PCI_BASE_ADDRESS_MEM_TYPE_32:
4009 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
4010 offset += 4;
4011 break;
4012 case PCI_BASE_ADDRESS_MEM_TYPE_64:
4013 offset += 8;
4014 break;
4015 default:
4016 dev_warn(&pdev->dev,
4017 "base address is invalid\n");
4018 return -1;
4019 break;
4020 }
4021 }
4022 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
4023 return i + 1;
4024 }
4025 return -1;
4026}
4027
4028
4029
4030
4031
4032static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
4033{
4034#ifdef CONFIG_PCI_MSI
4035 int err, i;
4036 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
4037
4038 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
4039 hpsa_msix_entries[i].vector = 0;
4040 hpsa_msix_entries[i].entry = i;
4041 }
4042
4043
4044 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
4045 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
4046 goto default_int_mode;
4047 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
4048 dev_info(&h->pdev->dev, "MSIX\n");
4049 err = pci_enable_msix(h->pdev, hpsa_msix_entries,
4050 MAX_REPLY_QUEUES);
4051 if (!err) {
4052 for (i = 0; i < MAX_REPLY_QUEUES; i++)
4053 h->intr[i] = hpsa_msix_entries[i].vector;
4054 h->msix_vector = 1;
4055 return;
4056 }
4057 if (err > 0) {
4058 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
4059 "available\n", err);
4060 goto default_int_mode;
4061 } else {
4062 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
4063 err);
4064 goto default_int_mode;
4065 }
4066 }
4067 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
4068 dev_info(&h->pdev->dev, "MSI\n");
4069 if (!pci_enable_msi(h->pdev))
4070 h->msi_vector = 1;
4071 else
4072 dev_warn(&h->pdev->dev, "MSI init failed\n");
4073 }
4074default_int_mode:
4075#endif
4076
4077 h->intr[h->intr_mode] = h->pdev->irq;
4078}
4079
4080static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
4081{
4082 int i;
4083 u32 subsystem_vendor_id, subsystem_device_id;
4084
4085 subsystem_vendor_id = pdev->subsystem_vendor;
4086 subsystem_device_id = pdev->subsystem_device;
4087 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
4088 subsystem_vendor_id;
4089
4090 for (i = 0; i < ARRAY_SIZE(products); i++)
4091 if (*board_id == products[i].board_id)
4092 return i;
4093
4094 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
4095 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
4096 !hpsa_allow_any) {
4097 dev_warn(&pdev->dev, "unrecognized board ID: "
4098 "0x%08x, ignoring.\n", *board_id);
4099 return -ENODEV;
4100 }
4101 return ARRAY_SIZE(products) - 1;
4102}
4103
4104static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
4105 unsigned long *memory_bar)
4106{
4107 int i;
4108
4109 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
4110 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
4111
4112 *memory_bar = pci_resource_start(pdev, i);
4113 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
4114 *memory_bar);
4115 return 0;
4116 }
4117 dev_warn(&pdev->dev, "no memory BAR found\n");
4118 return -ENODEV;
4119}
4120
4121static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
4122 void __iomem *vaddr, int wait_for_ready)
4123{
4124 int i, iterations;
4125 u32 scratchpad;
4126 if (wait_for_ready)
4127 iterations = HPSA_BOARD_READY_ITERATIONS;
4128 else
4129 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
4130
4131 for (i = 0; i < iterations; i++) {
4132 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
4133 if (wait_for_ready) {
4134 if (scratchpad == HPSA_FIRMWARE_READY)
4135 return 0;
4136 } else {
4137 if (scratchpad != HPSA_FIRMWARE_READY)
4138 return 0;
4139 }
4140 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
4141 }
4142 dev_warn(&pdev->dev, "board not ready, timed out.\n");
4143 return -ENODEV;
4144}
4145
4146static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
4147 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
4148 u64 *cfg_offset)
4149{
4150 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
4151 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
4152 *cfg_base_addr &= (u32) 0x0000ffff;
4153 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
4154 if (*cfg_base_addr_index == -1) {
4155 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
4156 return -ENODEV;
4157 }
4158 return 0;
4159}
4160
4161static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
4162{
4163 u64 cfg_offset;
4164 u32 cfg_base_addr;
4165 u64 cfg_base_addr_index;
4166 u32 trans_offset;
4167 int rc;
4168
4169 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
4170 &cfg_base_addr_index, &cfg_offset);
4171 if (rc)
4172 return rc;
4173 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
4174 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
4175 if (!h->cfgtable)
4176 return -ENOMEM;
4177 rc = write_driver_ver_to_cfgtable(h->cfgtable);
4178 if (rc)
4179 return rc;
4180
4181 trans_offset = readl(&h->cfgtable->TransMethodOffset);
4182 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
4183 cfg_base_addr_index)+cfg_offset+trans_offset,
4184 sizeof(*h->transtable));
4185 if (!h->transtable)
4186 return -ENOMEM;
4187 return 0;
4188}
4189
4190static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
4191{
4192 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
4193
4194
4195 if (reset_devices && h->max_commands > 32)
4196 h->max_commands = 32;
4197
4198 if (h->max_commands < 16) {
4199 dev_warn(&h->pdev->dev, "Controller reports "
4200 "max supported commands of %d, an obvious lie. "
4201 "Using 16. Ensure that firmware is up to date.\n",
4202 h->max_commands);
4203 h->max_commands = 16;
4204 }
4205}
4206
4207
4208
4209
4210
4211static void __devinit hpsa_find_board_params(struct ctlr_info *h)
4212{
4213 hpsa_get_max_perf_mode_cmds(h);
4214 h->nr_cmds = h->max_commands - 4;
4215 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
4216
4217
4218
4219
4220 h->max_cmd_sg_entries = 31;
4221 if (h->maxsgentries > 512) {
4222 h->max_cmd_sg_entries = 32;
4223 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
4224 h->maxsgentries--;
4225 } else {
4226 h->maxsgentries = 31;
4227 h->chainsize = 0;
4228 }
4229
4230
4231 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
4232}
4233
4234static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
4235{
4236 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
4237 dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
4238 return false;
4239 }
4240 return true;
4241}
4242
4243
4244static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h)
4245{
4246#ifdef CONFIG_X86
4247 u32 prefetch;
4248
4249 prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
4250 prefetch |= 0x100;
4251 writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
4252#endif
4253}
4254
4255
4256
4257
4258static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
4259{
4260 u32 dma_prefetch;
4261
4262 if (h->board_id != 0x3225103C)
4263 return;
4264 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
4265 dma_prefetch |= 0x8000;
4266 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
4267}
4268
4269static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
4270{
4271 int i;
4272 u32 doorbell_value;
4273 unsigned long flags;
4274
4275
4276
4277
4278
4279 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
4280 spin_lock_irqsave(&h->lock, flags);
4281 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
4282 spin_unlock_irqrestore(&h->lock, flags);
4283 if (!(doorbell_value & CFGTBL_ChangeReq))
4284 break;
4285
4286 usleep_range(10000, 20000);
4287 }
4288}
4289
4290static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
4291{
4292 u32 trans_support;
4293
4294 trans_support = readl(&(h->cfgtable->TransportSupport));
4295 if (!(trans_support & SIMPLE_MODE))
4296 return -ENOTSUPP;
4297
4298 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
4299
4300 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
4301 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
4302 hpsa_wait_for_mode_change_ack(h);
4303 print_cfg_table(&h->pdev->dev, h->cfgtable);
4304 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
4305 dev_warn(&h->pdev->dev,
4306 "unable to get board into simple mode\n");
4307 return -ENODEV;
4308 }
4309 h->transMethod = CFGTBL_Trans_Simple;
4310 return 0;
4311}
4312
4313static int __devinit hpsa_pci_init(struct ctlr_info *h)
4314{
4315 int prod_index, err;
4316
4317 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
4318 if (prod_index < 0)
4319 return -ENODEV;
4320 h->product_name = products[prod_index].product_name;
4321 h->access = *(products[prod_index].access);
4322
4323 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
4324 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
4325
4326 err = pci_enable_device(h->pdev);
4327 if (err) {
4328 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
4329 return err;
4330 }
4331
4332
4333 pci_set_master(h->pdev);
4334
4335 err = pci_request_regions(h->pdev, HPSA);
4336 if (err) {
4337 dev_err(&h->pdev->dev,
4338 "cannot obtain PCI resources, aborting\n");
4339 return err;
4340 }
4341 hpsa_interrupt_mode(h);
4342 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
4343 if (err)
4344 goto err_out_free_res;
4345 h->vaddr = remap_pci_mem(h->paddr, 0x250);
4346 if (!h->vaddr) {
4347 err = -ENOMEM;
4348 goto err_out_free_res;
4349 }
4350 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
4351 if (err)
4352 goto err_out_free_res;
4353 err = hpsa_find_cfgtables(h);
4354 if (err)
4355 goto err_out_free_res;
4356 hpsa_find_board_params(h);
4357
4358 if (!hpsa_CISS_signature_present(h)) {
4359 err = -ENODEV;
4360 goto err_out_free_res;
4361 }
4362 hpsa_enable_scsi_prefetch(h);
4363 hpsa_p600_dma_prefetch_quirk(h);
4364 err = hpsa_enter_simple_mode(h);
4365 if (err)
4366 goto err_out_free_res;
4367 return 0;
4368
4369err_out_free_res:
4370 if (h->transtable)
4371 iounmap(h->transtable);
4372 if (h->cfgtable)
4373 iounmap(h->cfgtable);
4374 if (h->vaddr)
4375 iounmap(h->vaddr);
4376 pci_disable_device(h->pdev);
4377 pci_release_regions(h->pdev);
4378 return err;
4379}
4380
4381static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
4382{
4383 int rc;
4384
4385#define HBA_INQUIRY_BYTE_COUNT 64
4386 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
4387 if (!h->hba_inquiry_data)
4388 return;
4389 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
4390 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
4391 if (rc != 0) {
4392 kfree(h->hba_inquiry_data);
4393 h->hba_inquiry_data = NULL;
4394 }
4395}
4396
4397static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
4398{
4399 int rc, i;
4400
4401 if (!reset_devices)
4402 return 0;
4403
4404
4405 rc = hpsa_kdump_hard_reset_controller(pdev);
4406
4407
4408
4409
4410
4411
4412 if (rc == -ENOTSUPP)
4413 return rc;
4414 if (rc)
4415 return -ENODEV;
4416
4417
4418 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
4419 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
4420 if (hpsa_noop(pdev) == 0)
4421 break;
4422 else
4423 dev_warn(&pdev->dev, "no-op failed%s\n",
4424 (i < 11 ? "; re-trying" : ""));
4425 }
4426 return 0;
4427}
4428
4429static __devinit int hpsa_allocate_cmd_pool(struct ctlr_info *h)
4430{
4431 h->cmd_pool_bits = kzalloc(
4432 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
4433 sizeof(unsigned long), GFP_KERNEL);
4434 h->cmd_pool = pci_alloc_consistent(h->pdev,
4435 h->nr_cmds * sizeof(*h->cmd_pool),
4436 &(h->cmd_pool_dhandle));
4437 h->errinfo_pool = pci_alloc_consistent(h->pdev,
4438 h->nr_cmds * sizeof(*h->errinfo_pool),
4439 &(h->errinfo_pool_dhandle));
4440 if ((h->cmd_pool_bits == NULL)
4441 || (h->cmd_pool == NULL)
4442 || (h->errinfo_pool == NULL)) {
4443 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
4444 return -ENOMEM;
4445 }
4446 return 0;
4447}
4448
4449static void hpsa_free_cmd_pool(struct ctlr_info *h)
4450{
4451 kfree(h->cmd_pool_bits);
4452 if (h->cmd_pool)
4453 pci_free_consistent(h->pdev,
4454 h->nr_cmds * sizeof(struct CommandList),
4455 h->cmd_pool, h->cmd_pool_dhandle);
4456 if (h->errinfo_pool)
4457 pci_free_consistent(h->pdev,
4458 h->nr_cmds * sizeof(struct ErrorInfo),
4459 h->errinfo_pool,
4460 h->errinfo_pool_dhandle);
4461}
4462
4463static int hpsa_request_irq(struct ctlr_info *h,
4464 irqreturn_t (*msixhandler)(int, void *),
4465 irqreturn_t (*intxhandler)(int, void *))
4466{
4467 int rc, i;
4468
4469
4470
4471
4472
4473 for (i = 0; i < MAX_REPLY_QUEUES; i++)
4474 h->q[i] = (u8) i;
4475
4476 if (h->intr_mode == PERF_MODE_INT && h->msix_vector) {
4477
4478 for (i = 0; i < MAX_REPLY_QUEUES; i++)
4479 rc = request_irq(h->intr[i], msixhandler,
4480 0, h->devname,
4481 &h->q[i]);
4482 } else {
4483
4484 if (h->msix_vector || h->msi_vector) {
4485 rc = request_irq(h->intr[h->intr_mode],
4486 msixhandler, 0, h->devname,
4487 &h->q[h->intr_mode]);
4488 } else {
4489 rc = request_irq(h->intr[h->intr_mode],
4490 intxhandler, IRQF_SHARED, h->devname,
4491 &h->q[h->intr_mode]);
4492 }
4493 }
4494 if (rc) {
4495 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
4496 h->intr[h->intr_mode], h->devname);
4497 return -ENODEV;
4498 }
4499 return 0;
4500}
4501
4502static int __devinit hpsa_kdump_soft_reset(struct ctlr_info *h)
4503{
4504 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
4505 HPSA_RESET_TYPE_CONTROLLER)) {
4506 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
4507 return -EIO;
4508 }
4509
4510 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
4511 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
4512 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
4513 return -1;
4514 }
4515
4516 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
4517 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
4518 dev_warn(&h->pdev->dev, "Board failed to become ready "
4519 "after soft reset.\n");
4520 return -1;
4521 }
4522
4523 return 0;
4524}
4525
4526static void free_irqs(struct ctlr_info *h)
4527{
4528 int i;
4529
4530 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
4531
4532 i = h->intr_mode;
4533 free_irq(h->intr[i], &h->q[i]);
4534 return;
4535 }
4536
4537 for (i = 0; i < MAX_REPLY_QUEUES; i++)
4538 free_irq(h->intr[i], &h->q[i]);
4539}
4540
4541static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
4542{
4543 free_irqs(h);
4544#ifdef CONFIG_PCI_MSI
4545 if (h->msix_vector) {
4546 if (h->pdev->msix_enabled)
4547 pci_disable_msix(h->pdev);
4548 } else if (h->msi_vector) {
4549 if (h->pdev->msi_enabled)
4550 pci_disable_msi(h->pdev);
4551 }
4552#endif
4553}
4554
4555static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
4556{
4557 hpsa_free_irqs_and_disable_msix(h);
4558 hpsa_free_sg_chain_blocks(h);
4559 hpsa_free_cmd_pool(h);
4560 kfree(h->blockFetchTable);
4561 pci_free_consistent(h->pdev, h->reply_pool_size,
4562 h->reply_pool, h->reply_pool_dhandle);
4563 if (h->vaddr)
4564 iounmap(h->vaddr);
4565 if (h->transtable)
4566 iounmap(h->transtable);
4567 if (h->cfgtable)
4568 iounmap(h->cfgtable);
4569 pci_release_regions(h->pdev);
4570 kfree(h);
4571}
4572
4573static void remove_ctlr_from_lockup_detector_list(struct ctlr_info *h)
4574{
4575 assert_spin_locked(&lockup_detector_lock);
4576 if (!hpsa_lockup_detector)
4577 return;
4578 if (h->lockup_detected)
4579 return;
4580 list_del(&h->lockup_list);
4581}
4582
4583
4584static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
4585{
4586 struct CommandList *c = NULL;
4587
4588 assert_spin_locked(&h->lock);
4589
4590 while (!list_empty(list)) {
4591 c = list_entry(list->next, struct CommandList, list);
4592 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
4593 finish_cmd(c);
4594 }
4595}
4596
4597static void controller_lockup_detected(struct ctlr_info *h)
4598{
4599 unsigned long flags;
4600
4601 assert_spin_locked(&lockup_detector_lock);
4602 remove_ctlr_from_lockup_detector_list(h);
4603 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4604 spin_lock_irqsave(&h->lock, flags);
4605 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
4606 spin_unlock_irqrestore(&h->lock, flags);
4607 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
4608 h->lockup_detected);
4609 pci_disable_device(h->pdev);
4610 spin_lock_irqsave(&h->lock, flags);
4611 fail_all_cmds_on_list(h, &h->cmpQ);
4612 fail_all_cmds_on_list(h, &h->reqQ);
4613 spin_unlock_irqrestore(&h->lock, flags);
4614}
4615
4616static void detect_controller_lockup(struct ctlr_info *h)
4617{
4618 u64 now;
4619 u32 heartbeat;
4620 unsigned long flags;
4621
4622 assert_spin_locked(&lockup_detector_lock);
4623 now = get_jiffies_64();
4624
4625 if (time_after64(h->last_intr_timestamp +
4626 (h->heartbeat_sample_interval), now))
4627 return;
4628
4629
4630
4631
4632
4633
4634 if (time_after64(h->last_heartbeat_timestamp +
4635 (h->heartbeat_sample_interval), now))
4636 return;
4637
4638
4639 spin_lock_irqsave(&h->lock, flags);
4640 heartbeat = readl(&h->cfgtable->HeartBeat);
4641 spin_unlock_irqrestore(&h->lock, flags);
4642 if (h->last_heartbeat == heartbeat) {
4643 controller_lockup_detected(h);
4644 return;
4645 }
4646
4647
4648 h->last_heartbeat = heartbeat;
4649 h->last_heartbeat_timestamp = now;
4650}
4651
4652static int detect_controller_lockup_thread(void *notused)
4653{
4654 struct ctlr_info *h;
4655 unsigned long flags;
4656
4657 while (1) {
4658 struct list_head *this, *tmp;
4659
4660 schedule_timeout_interruptible(HEARTBEAT_SAMPLE_INTERVAL);
4661 if (kthread_should_stop())
4662 break;
4663 spin_lock_irqsave(&lockup_detector_lock, flags);
4664 list_for_each_safe(this, tmp, &hpsa_ctlr_list) {
4665 h = list_entry(this, struct ctlr_info, lockup_list);
4666 detect_controller_lockup(h);
4667 }
4668 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4669 }
4670 return 0;
4671}
4672
4673static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h)
4674{
4675 unsigned long flags;
4676
4677 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
4678 spin_lock_irqsave(&lockup_detector_lock, flags);
4679 list_add_tail(&h->lockup_list, &hpsa_ctlr_list);
4680 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4681}
4682
4683static void start_controller_lockup_detector(struct ctlr_info *h)
4684{
4685
4686 if (!hpsa_lockup_detector) {
4687 spin_lock_init(&lockup_detector_lock);
4688 hpsa_lockup_detector =
4689 kthread_run(detect_controller_lockup_thread,
4690 NULL, HPSA);
4691 }
4692 if (!hpsa_lockup_detector) {
4693 dev_warn(&h->pdev->dev,
4694 "Could not start lockup detector thread\n");
4695 return;
4696 }
4697 add_ctlr_to_lockup_detector_list(h);
4698}
4699
4700static void stop_controller_lockup_detector(struct ctlr_info *h)
4701{
4702 unsigned long flags;
4703
4704 spin_lock_irqsave(&lockup_detector_lock, flags);
4705 remove_ctlr_from_lockup_detector_list(h);
4706
4707 if (list_empty(&hpsa_ctlr_list)) {
4708 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4709 kthread_stop(hpsa_lockup_detector);
4710 spin_lock_irqsave(&lockup_detector_lock, flags);
4711 hpsa_lockup_detector = NULL;
4712 }
4713 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4714}
4715
4716static int __devinit hpsa_init_one(struct pci_dev *pdev,
4717 const struct pci_device_id *ent)
4718{
4719 int dac, rc;
4720 struct ctlr_info *h;
4721 int try_soft_reset = 0;
4722 unsigned long flags;
4723
4724 if (number_of_controllers == 0)
4725 printk(KERN_INFO DRIVER_NAME "\n");
4726
4727 rc = hpsa_init_reset_devices(pdev);
4728 if (rc) {
4729 if (rc != -ENOTSUPP)
4730 return rc;
4731
4732
4733
4734
4735
4736 try_soft_reset = 1;
4737 rc = 0;
4738 }
4739
4740reinit_after_soft_reset:
4741
4742
4743
4744
4745
4746#define COMMANDLIST_ALIGNMENT 32
4747 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
4748 h = kzalloc(sizeof(*h), GFP_KERNEL);
4749 if (!h)
4750 return -ENOMEM;
4751
4752 h->pdev = pdev;
4753 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
4754 INIT_LIST_HEAD(&h->cmpQ);
4755 INIT_LIST_HEAD(&h->reqQ);
4756 spin_lock_init(&h->lock);
4757 spin_lock_init(&h->scan_lock);
4758 rc = hpsa_pci_init(h);
4759 if (rc != 0)
4760 goto clean1;
4761
4762 sprintf(h->devname, HPSA "%d", number_of_controllers);
4763 h->ctlr = number_of_controllers;
4764 number_of_controllers++;
4765
4766
4767 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4768 if (rc == 0) {
4769 dac = 1;
4770 } else {
4771 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4772 if (rc == 0) {
4773 dac = 0;
4774 } else {
4775 dev_err(&pdev->dev, "no suitable DMA available\n");
4776 goto clean1;
4777 }
4778 }
4779
4780
4781 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4782
4783 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
4784 goto clean2;
4785 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
4786 h->devname, pdev->device,
4787 h->intr[h->intr_mode], dac ? "" : " not");
4788 if (hpsa_allocate_cmd_pool(h))
4789 goto clean4;
4790 if (hpsa_allocate_sg_chain_blocks(h))
4791 goto clean4;
4792 init_waitqueue_head(&h->scan_wait_queue);
4793 h->scan_finished = 1;
4794
4795 pci_set_drvdata(pdev, h);
4796 h->ndevices = 0;
4797 h->scsi_host = NULL;
4798 spin_lock_init(&h->devlock);
4799 hpsa_put_ctlr_into_performant_mode(h);
4800
4801
4802
4803
4804
4805 if (try_soft_reset) {
4806
4807
4808
4809
4810
4811
4812
4813
4814 spin_lock_irqsave(&h->lock, flags);
4815 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4816 spin_unlock_irqrestore(&h->lock, flags);
4817 free_irqs(h);
4818 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
4819 hpsa_intx_discard_completions);
4820 if (rc) {
4821 dev_warn(&h->pdev->dev, "Failed to request_irq after "
4822 "soft reset.\n");
4823 goto clean4;
4824 }
4825
4826 rc = hpsa_kdump_soft_reset(h);
4827 if (rc)
4828
4829 goto clean4;
4830
4831 dev_info(&h->pdev->dev, "Board READY.\n");
4832 dev_info(&h->pdev->dev,
4833 "Waiting for stale completions to drain.\n");
4834 h->access.set_intr_mask(h, HPSA_INTR_ON);
4835 msleep(10000);
4836 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4837
4838 rc = controller_reset_failed(h->cfgtable);
4839 if (rc)
4840 dev_info(&h->pdev->dev,
4841 "Soft reset appears to have failed.\n");
4842
4843
4844
4845
4846
4847 hpsa_undo_allocations_after_kdump_soft_reset(h);
4848 try_soft_reset = 0;
4849 if (rc)
4850
4851 return -ENODEV;
4852
4853 goto reinit_after_soft_reset;
4854 }
4855
4856
4857 h->access.set_intr_mask(h, HPSA_INTR_ON);
4858
4859 hpsa_hba_inquiry(h);
4860 hpsa_register_scsi(h);
4861 start_controller_lockup_detector(h);
4862 return 1;
4863
4864clean4:
4865 hpsa_free_sg_chain_blocks(h);
4866 hpsa_free_cmd_pool(h);
4867 free_irqs(h);
4868clean2:
4869clean1:
4870 kfree(h);
4871 return rc;
4872}
4873
4874static void hpsa_flush_cache(struct ctlr_info *h)
4875{
4876 char *flush_buf;
4877 struct CommandList *c;
4878
4879 flush_buf = kzalloc(4, GFP_KERNEL);
4880 if (!flush_buf)
4881 return;
4882
4883 c = cmd_special_alloc(h);
4884 if (!c) {
4885 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
4886 goto out_of_memory;
4887 }
4888 fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
4889 RAID_CTLR_LUNID, TYPE_CMD);
4890 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
4891 if (c->err_info->CommandStatus != 0)
4892 dev_warn(&h->pdev->dev,
4893 "error flushing cache on controller\n");
4894 cmd_special_free(h, c);
4895out_of_memory:
4896 kfree(flush_buf);
4897}
4898
4899static void hpsa_shutdown(struct pci_dev *pdev)
4900{
4901 struct ctlr_info *h;
4902
4903 h = pci_get_drvdata(pdev);
4904
4905
4906
4907
4908 hpsa_flush_cache(h);
4909 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4910 hpsa_free_irqs_and_disable_msix(h);
4911}
4912
4913static void __devexit hpsa_free_device_info(struct ctlr_info *h)
4914{
4915 int i;
4916
4917 for (i = 0; i < h->ndevices; i++)
4918 kfree(h->dev[i]);
4919}
4920
4921static void __devexit hpsa_remove_one(struct pci_dev *pdev)
4922{
4923 struct ctlr_info *h;
4924
4925 if (pci_get_drvdata(pdev) == NULL) {
4926 dev_err(&pdev->dev, "unable to remove device\n");
4927 return;
4928 }
4929 h = pci_get_drvdata(pdev);
4930 stop_controller_lockup_detector(h);
4931 hpsa_unregister_scsi(h);
4932 hpsa_shutdown(pdev);
4933 iounmap(h->vaddr);
4934 iounmap(h->transtable);
4935 iounmap(h->cfgtable);
4936 hpsa_free_device_info(h);
4937 hpsa_free_sg_chain_blocks(h);
4938 pci_free_consistent(h->pdev,
4939 h->nr_cmds * sizeof(struct CommandList),
4940 h->cmd_pool, h->cmd_pool_dhandle);
4941 pci_free_consistent(h->pdev,
4942 h->nr_cmds * sizeof(struct ErrorInfo),
4943 h->errinfo_pool, h->errinfo_pool_dhandle);
4944 pci_free_consistent(h->pdev, h->reply_pool_size,
4945 h->reply_pool, h->reply_pool_dhandle);
4946 kfree(h->cmd_pool_bits);
4947 kfree(h->blockFetchTable);
4948 kfree(h->hba_inquiry_data);
4949 pci_disable_device(pdev);
4950 pci_release_regions(pdev);
4951 pci_set_drvdata(pdev, NULL);
4952 kfree(h);
4953}
4954
4955static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
4956 __attribute__((unused)) pm_message_t state)
4957{
4958 return -ENOSYS;
4959}
4960
4961static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
4962{
4963 return -ENOSYS;
4964}
4965
4966static struct pci_driver hpsa_pci_driver = {
4967 .name = HPSA,
4968 .probe = hpsa_init_one,
4969 .remove = __devexit_p(hpsa_remove_one),
4970 .id_table = hpsa_pci_device_id,
4971 .shutdown = hpsa_shutdown,
4972 .suspend = hpsa_suspend,
4973 .resume = hpsa_resume,
4974};
4975
4976
4977
4978
4979
4980
4981
4982
4983
4984
4985
4986
4987
4988static void calc_bucket_map(int bucket[], int num_buckets,
4989 int nsgs, int *bucket_map)
4990{
4991 int i, j, b, size;
4992
4993
4994#define MINIMUM_TRANSFER_BLOCKS 4
4995#define NUM_BUCKETS 8
4996
4997 for (i = 0; i <= nsgs; i++) {
4998
4999 size = i + MINIMUM_TRANSFER_BLOCKS;
5000 b = num_buckets;
5001
5002 for (j = 0; j < 8; j++) {
5003 if (bucket[j] >= size) {
5004 b = j;
5005 break;
5006 }
5007 }
5008
5009 bucket_map[i] = b;
5010 }
5011}
5012
5013static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
5014 u32 use_short_tags)
5015{
5016 int i;
5017 unsigned long register_value;
5018
5019
5020
5021
5022
5023
5024
5025
5026
5027
5028
5029
5030
5031
5032
5033
5034
5035
5036 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
5037 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
5038
5039
5040
5041
5042
5043
5044
5045 memset(h->reply_pool, 0, h->reply_pool_size);
5046
5047 bft[7] = SG_ENTRIES_IN_CMD + 4;
5048 calc_bucket_map(bft, ARRAY_SIZE(bft),
5049 SG_ENTRIES_IN_CMD, h->blockFetchTable);
5050 for (i = 0; i < 8; i++)
5051 writel(bft[i], &h->transtable->BlockFetch[i]);
5052
5053
5054 writel(h->max_commands, &h->transtable->RepQSize);
5055 writel(h->nreply_queues, &h->transtable->RepQCount);
5056 writel(0, &h->transtable->RepQCtrAddrLow32);
5057 writel(0, &h->transtable->RepQCtrAddrHigh32);
5058
5059 for (i = 0; i < h->nreply_queues; i++) {
5060 writel(0, &h->transtable->RepQAddr[i].upper);
5061 writel(h->reply_pool_dhandle +
5062 (h->max_commands * sizeof(u64) * i),
5063 &h->transtable->RepQAddr[i].lower);
5064 }
5065
5066 writel(CFGTBL_Trans_Performant | use_short_tags |
5067 CFGTBL_Trans_enable_directed_msix,
5068 &(h->cfgtable->HostWrite.TransportRequest));
5069 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
5070 hpsa_wait_for_mode_change_ack(h);
5071 register_value = readl(&(h->cfgtable->TransportActive));
5072 if (!(register_value & CFGTBL_Trans_Performant)) {
5073 dev_warn(&h->pdev->dev, "unable to get board into"
5074 " performant mode\n");
5075 return;
5076 }
5077
5078 h->access = SA5_performant_access;
5079 h->transMethod = CFGTBL_Trans_Performant;
5080}
5081
5082static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
5083{
5084 u32 trans_support;
5085 int i;
5086
5087 if (hpsa_simple_mode)
5088 return;
5089
5090 trans_support = readl(&(h->cfgtable->TransportSupport));
5091 if (!(trans_support & PERFORMANT_MODE))
5092 return;
5093
5094 h->nreply_queues = h->msix_vector ? MAX_REPLY_QUEUES : 1;
5095 hpsa_get_max_perf_mode_cmds(h);
5096
5097 h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues;
5098 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
5099 &(h->reply_pool_dhandle));
5100
5101 for (i = 0; i < h->nreply_queues; i++) {
5102 h->reply_queue[i].head = &h->reply_pool[h->max_commands * i];
5103 h->reply_queue[i].size = h->max_commands;
5104 h->reply_queue[i].wraparound = 1;
5105 h->reply_queue[i].current_entry = 0;
5106 }
5107
5108
5109 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
5110 sizeof(u32)), GFP_KERNEL);
5111
5112 if ((h->reply_pool == NULL)
5113 || (h->blockFetchTable == NULL))
5114 goto clean_up;
5115
5116 hpsa_enter_performant_mode(h,
5117 trans_support & CFGTBL_Trans_use_short_tags);
5118
5119 return;
5120
5121clean_up:
5122 if (h->reply_pool)
5123 pci_free_consistent(h->pdev, h->reply_pool_size,
5124 h->reply_pool, h->reply_pool_dhandle);
5125 kfree(h->blockFetchTable);
5126}
5127
5128
5129
5130
5131
5132static int __init hpsa_init(void)
5133{
5134 return pci_register_driver(&hpsa_pci_driver);
5135}
5136
5137static void __exit hpsa_cleanup(void)
5138{
5139 pci_unregister_driver(&hpsa_pci_driver);
5140}
5141
5142module_init(hpsa_init);
5143module_exit(hpsa_cleanup);
5144