1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/module.h>
23#include <linux/interrupt.h>
24#include <linux/types.h>
25#include <linux/pci.h>
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/delay.h>
29#include <linux/fs.h>
30#include <linux/timer.h>
31#include <linux/seq_file.h>
32#include <linux/init.h>
33#include <linux/spinlock.h>
34#include <linux/compat.h>
35#include <linux/blktrace_api.h>
36#include <linux/uaccess.h>
37#include <linux/io.h>
38#include <linux/dma-mapping.h>
39#include <linux/completion.h>
40#include <linux/moduleparam.h>
41#include <scsi/scsi.h>
42#include <scsi/scsi_cmnd.h>
43#include <scsi/scsi_device.h>
44#include <scsi/scsi_host.h>
45#include <scsi/scsi_tcq.h>
46#include <linux/cciss_ioctl.h>
47#include <linux/string.h>
48#include <linux/bitmap.h>
49#include <asm/atomic.h>
50#include <linux/kthread.h>
51#include "hpsa_cmd.h"
52#include "hpsa.h"
53
54
55#define HPSA_DRIVER_VERSION "2.0.2-1"
56#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
57
58
59#define MAX_CONFIG_WAIT 30000
60#define MAX_IOCTL_CONFIG_WAIT 1000
61
62
63#define MAX_CMD_RETRIES 3
64
65
66MODULE_AUTHOR("Hewlett-Packard Company");
67MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
68 HPSA_DRIVER_VERSION);
69MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
70MODULE_VERSION(HPSA_DRIVER_VERSION);
71MODULE_LICENSE("GPL");
72
73static int hpsa_allow_any;
74module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
75MODULE_PARM_DESC(hpsa_allow_any,
76 "Allow hpsa driver to access unknown HP Smart Array hardware");
77
78
79static const struct pci_device_id hpsa_pci_device_id[] = {
80 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3250},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3251},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3252},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3253},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3254},
93 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
94 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
95 {0,}
96};
97
98MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
99
100
101
102
103
104static struct board_type products[] = {
105 {0x3241103C, "Smart Array P212", &SA5_access},
106 {0x3243103C, "Smart Array P410", &SA5_access},
107 {0x3245103C, "Smart Array P410i", &SA5_access},
108 {0x3247103C, "Smart Array P411", &SA5_access},
109 {0x3249103C, "Smart Array P812", &SA5_access},
110 {0x324a103C, "Smart Array P712m", &SA5_access},
111 {0x324b103C, "Smart Array P711m", &SA5_access},
112 {0x3250103C, "Smart Array", &SA5_access},
113 {0x3250113C, "Smart Array", &SA5_access},
114 {0x3250123C, "Smart Array", &SA5_access},
115 {0x3250133C, "Smart Array", &SA5_access},
116 {0x3250143C, "Smart Array", &SA5_access},
117 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
118};
119
120static int number_of_controllers;
121
122static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
123static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
124static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
125static void start_io(struct ctlr_info *h);
126
127#ifdef CONFIG_COMPAT
128static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
129#endif
130
131static void cmd_free(struct ctlr_info *h, struct CommandList *c);
132static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
133static struct CommandList *cmd_alloc(struct ctlr_info *h);
134static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
135static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
136 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
137 int cmd_type);
138
139static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
140static void hpsa_scan_start(struct Scsi_Host *);
141static int hpsa_scan_finished(struct Scsi_Host *sh,
142 unsigned long elapsed_time);
143static int hpsa_change_queue_depth(struct scsi_device *sdev,
144 int qdepth, int reason);
145
146static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
147static int hpsa_slave_alloc(struct scsi_device *sdev);
148static void hpsa_slave_destroy(struct scsi_device *sdev);
149
150static ssize_t raid_level_show(struct device *dev,
151 struct device_attribute *attr, char *buf);
152static ssize_t lunid_show(struct device *dev,
153 struct device_attribute *attr, char *buf);
154static ssize_t unique_id_show(struct device *dev,
155 struct device_attribute *attr, char *buf);
156static ssize_t host_show_firmware_revision(struct device *dev,
157 struct device_attribute *attr, char *buf);
158static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
159static ssize_t host_store_rescan(struct device *dev,
160 struct device_attribute *attr, const char *buf, size_t count);
161static int check_for_unit_attention(struct ctlr_info *h,
162 struct CommandList *c);
163static void check_ioctl_unit_attention(struct ctlr_info *h,
164 struct CommandList *c);
165
166static void calc_bucket_map(int *bucket, int num_buckets,
167 int nsgs, int *bucket_map);
168static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
169static inline u32 next_command(struct ctlr_info *h);
170static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
171 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
172 u64 *cfg_offset);
173static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
174 unsigned long *memory_bar);
175static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
176
177static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
178static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
179static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
180static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
181static DEVICE_ATTR(firmware_revision, S_IRUGO,
182 host_show_firmware_revision, NULL);
183
184static struct device_attribute *hpsa_sdev_attrs[] = {
185 &dev_attr_raid_level,
186 &dev_attr_lunid,
187 &dev_attr_unique_id,
188 NULL,
189};
190
191static struct device_attribute *hpsa_shost_attrs[] = {
192 &dev_attr_rescan,
193 &dev_attr_firmware_revision,
194 NULL,
195};
196
197static struct scsi_host_template hpsa_driver_template = {
198 .module = THIS_MODULE,
199 .name = "hpsa",
200 .proc_name = "hpsa",
201 .queuecommand = hpsa_scsi_queue_command,
202 .scan_start = hpsa_scan_start,
203 .scan_finished = hpsa_scan_finished,
204 .change_queue_depth = hpsa_change_queue_depth,
205 .this_id = -1,
206 .use_clustering = ENABLE_CLUSTERING,
207 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
208 .ioctl = hpsa_ioctl,
209 .slave_alloc = hpsa_slave_alloc,
210 .slave_destroy = hpsa_slave_destroy,
211#ifdef CONFIG_COMPAT
212 .compat_ioctl = hpsa_compat_ioctl,
213#endif
214 .sdev_attrs = hpsa_sdev_attrs,
215 .shost_attrs = hpsa_shost_attrs,
216};
217
218static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
219{
220 unsigned long *priv = shost_priv(sdev->host);
221 return (struct ctlr_info *) *priv;
222}
223
224static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
225{
226 unsigned long *priv = shost_priv(sh);
227 return (struct ctlr_info *) *priv;
228}
229
230static int check_for_unit_attention(struct ctlr_info *h,
231 struct CommandList *c)
232{
233 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
234 return 0;
235
236 switch (c->err_info->SenseInfo[12]) {
237 case STATE_CHANGED:
238 dev_warn(&h->pdev->dev, "hpsa%d: a state change "
239 "detected, command retried\n", h->ctlr);
240 break;
241 case LUN_FAILED:
242 dev_warn(&h->pdev->dev, "hpsa%d: LUN failure "
243 "detected, action required\n", h->ctlr);
244 break;
245 case REPORT_LUNS_CHANGED:
246 dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
247 "changed, action required\n", h->ctlr);
248
249
250
251 break;
252 case POWER_OR_RESET:
253 dev_warn(&h->pdev->dev, "hpsa%d: a power on "
254 "or device reset detected\n", h->ctlr);
255 break;
256 case UNIT_ATTENTION_CLEARED:
257 dev_warn(&h->pdev->dev, "hpsa%d: unit attention "
258 "cleared by another initiator\n", h->ctlr);
259 break;
260 default:
261 dev_warn(&h->pdev->dev, "hpsa%d: unknown "
262 "unit attention detected\n", h->ctlr);
263 break;
264 }
265 return 1;
266}
267
268static ssize_t host_store_rescan(struct device *dev,
269 struct device_attribute *attr,
270 const char *buf, size_t count)
271{
272 struct ctlr_info *h;
273 struct Scsi_Host *shost = class_to_shost(dev);
274 h = shost_to_hba(shost);
275 hpsa_scan_start(h->scsi_host);
276 return count;
277}
278
279static ssize_t host_show_firmware_revision(struct device *dev,
280 struct device_attribute *attr, char *buf)
281{
282 struct ctlr_info *h;
283 struct Scsi_Host *shost = class_to_shost(dev);
284 unsigned char *fwrev;
285
286 h = shost_to_hba(shost);
287 if (!h->hba_inquiry_data)
288 return 0;
289 fwrev = &h->hba_inquiry_data[32];
290 return snprintf(buf, 20, "%c%c%c%c\n",
291 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
292}
293
294
295static inline void addQ(struct hlist_head *list, struct CommandList *c)
296{
297 hlist_add_head(&c->list, list);
298}
299
300static inline u32 next_command(struct ctlr_info *h)
301{
302 u32 a;
303
304 if (unlikely(h->transMethod != CFGTBL_Trans_Performant))
305 return h->access.command_completed(h);
306
307 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
308 a = *(h->reply_pool_head);
309 (h->reply_pool_head)++;
310 h->commands_outstanding--;
311 } else {
312 a = FIFO_EMPTY;
313 }
314
315 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
316 h->reply_pool_head = h->reply_pool;
317 h->reply_pool_wraparound ^= 1;
318 }
319 return a;
320}
321
322
323
324
325
326static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
327{
328 if (likely(h->transMethod == CFGTBL_Trans_Performant))
329 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
330}
331
332static void enqueue_cmd_and_start_io(struct ctlr_info *h,
333 struct CommandList *c)
334{
335 unsigned long flags;
336
337 set_performant_mode(h, c);
338 spin_lock_irqsave(&h->lock, flags);
339 addQ(&h->reqQ, c);
340 h->Qdepth++;
341 start_io(h);
342 spin_unlock_irqrestore(&h->lock, flags);
343}
344
345static inline void removeQ(struct CommandList *c)
346{
347 if (WARN_ON(hlist_unhashed(&c->list)))
348 return;
349 hlist_del_init(&c->list);
350}
351
352static inline int is_hba_lunid(unsigned char scsi3addr[])
353{
354 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
355}
356
357static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
358{
359 return (scsi3addr[3] & 0xC0) == 0x40;
360}
361
362static inline int is_scsi_rev_5(struct ctlr_info *h)
363{
364 if (!h->hba_inquiry_data)
365 return 0;
366 if ((h->hba_inquiry_data[2] & 0x07) == 5)
367 return 1;
368 return 0;
369}
370
371static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
372 "UNKNOWN"
373};
374#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
375
376static ssize_t raid_level_show(struct device *dev,
377 struct device_attribute *attr, char *buf)
378{
379 ssize_t l = 0;
380 unsigned char rlevel;
381 struct ctlr_info *h;
382 struct scsi_device *sdev;
383 struct hpsa_scsi_dev_t *hdev;
384 unsigned long flags;
385
386 sdev = to_scsi_device(dev);
387 h = sdev_to_hba(sdev);
388 spin_lock_irqsave(&h->lock, flags);
389 hdev = sdev->hostdata;
390 if (!hdev) {
391 spin_unlock_irqrestore(&h->lock, flags);
392 return -ENODEV;
393 }
394
395
396 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
397 spin_unlock_irqrestore(&h->lock, flags);
398 l = snprintf(buf, PAGE_SIZE, "N/A\n");
399 return l;
400 }
401
402 rlevel = hdev->raid_level;
403 spin_unlock_irqrestore(&h->lock, flags);
404 if (rlevel > RAID_UNKNOWN)
405 rlevel = RAID_UNKNOWN;
406 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
407 return l;
408}
409
410static ssize_t lunid_show(struct device *dev,
411 struct device_attribute *attr, char *buf)
412{
413 struct ctlr_info *h;
414 struct scsi_device *sdev;
415 struct hpsa_scsi_dev_t *hdev;
416 unsigned long flags;
417 unsigned char lunid[8];
418
419 sdev = to_scsi_device(dev);
420 h = sdev_to_hba(sdev);
421 spin_lock_irqsave(&h->lock, flags);
422 hdev = sdev->hostdata;
423 if (!hdev) {
424 spin_unlock_irqrestore(&h->lock, flags);
425 return -ENODEV;
426 }
427 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
428 spin_unlock_irqrestore(&h->lock, flags);
429 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
430 lunid[0], lunid[1], lunid[2], lunid[3],
431 lunid[4], lunid[5], lunid[6], lunid[7]);
432}
433
434static ssize_t unique_id_show(struct device *dev,
435 struct device_attribute *attr, char *buf)
436{
437 struct ctlr_info *h;
438 struct scsi_device *sdev;
439 struct hpsa_scsi_dev_t *hdev;
440 unsigned long flags;
441 unsigned char sn[16];
442
443 sdev = to_scsi_device(dev);
444 h = sdev_to_hba(sdev);
445 spin_lock_irqsave(&h->lock, flags);
446 hdev = sdev->hostdata;
447 if (!hdev) {
448 spin_unlock_irqrestore(&h->lock, flags);
449 return -ENODEV;
450 }
451 memcpy(sn, hdev->device_id, sizeof(sn));
452 spin_unlock_irqrestore(&h->lock, flags);
453 return snprintf(buf, 16 * 2 + 2,
454 "%02X%02X%02X%02X%02X%02X%02X%02X"
455 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
456 sn[0], sn[1], sn[2], sn[3],
457 sn[4], sn[5], sn[6], sn[7],
458 sn[8], sn[9], sn[10], sn[11],
459 sn[12], sn[13], sn[14], sn[15]);
460}
461
462static int hpsa_find_target_lun(struct ctlr_info *h,
463 unsigned char scsi3addr[], int bus, int *target, int *lun)
464{
465
466
467
468 int i, found = 0;
469 DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA);
470
471 memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3);
472
473 for (i = 0; i < h->ndevices; i++) {
474 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
475 set_bit(h->dev[i]->target, lun_taken);
476 }
477
478 for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) {
479 if (!test_bit(i, lun_taken)) {
480
481 *target = i;
482 *lun = 0;
483 found = 1;
484 break;
485 }
486 }
487 return !found;
488}
489
490
491static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
492 struct hpsa_scsi_dev_t *device,
493 struct hpsa_scsi_dev_t *added[], int *nadded)
494{
495
496 int n = h->ndevices;
497 int i;
498 unsigned char addr1[8], addr2[8];
499 struct hpsa_scsi_dev_t *sd;
500
501 if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) {
502 dev_err(&h->pdev->dev, "too many devices, some will be "
503 "inaccessible.\n");
504 return -1;
505 }
506
507
508 if (device->lun != -1)
509
510 goto lun_assigned;
511
512
513
514
515
516 if (device->scsi3addr[4] == 0) {
517
518 if (hpsa_find_target_lun(h, device->scsi3addr,
519 device->bus, &device->target, &device->lun) != 0)
520 return -1;
521 goto lun_assigned;
522 }
523
524
525
526
527
528
529
530 memcpy(addr1, device->scsi3addr, 8);
531 addr1[4] = 0;
532 for (i = 0; i < n; i++) {
533 sd = h->dev[i];
534 memcpy(addr2, sd->scsi3addr, 8);
535 addr2[4] = 0;
536
537 if (memcmp(addr1, addr2, 8) == 0) {
538 device->bus = sd->bus;
539 device->target = sd->target;
540 device->lun = device->scsi3addr[4];
541 break;
542 }
543 }
544 if (device->lun == -1) {
545 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
546 " suspect firmware bug or unsupported hardware "
547 "configuration.\n");
548 return -1;
549 }
550
551lun_assigned:
552
553 h->dev[n] = device;
554 h->ndevices++;
555 added[*nadded] = device;
556 (*nadded)++;
557
558
559
560
561
562
563 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
564 scsi_device_type(device->devtype), hostno,
565 device->bus, device->target, device->lun);
566 return 0;
567}
568
569
570static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
571 int entry, struct hpsa_scsi_dev_t *new_entry,
572 struct hpsa_scsi_dev_t *added[], int *nadded,
573 struct hpsa_scsi_dev_t *removed[], int *nremoved)
574{
575
576 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
577 removed[*nremoved] = h->dev[entry];
578 (*nremoved)++;
579 h->dev[entry] = new_entry;
580 added[*nadded] = new_entry;
581 (*nadded)++;
582 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
583 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
584 new_entry->target, new_entry->lun);
585}
586
587
588static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
589 struct hpsa_scsi_dev_t *removed[], int *nremoved)
590{
591
592 int i;
593 struct hpsa_scsi_dev_t *sd;
594
595 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
596
597 sd = h->dev[entry];
598 removed[*nremoved] = h->dev[entry];
599 (*nremoved)++;
600
601 for (i = entry; i < h->ndevices-1; i++)
602 h->dev[i] = h->dev[i+1];
603 h->ndevices--;
604 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
605 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
606 sd->lun);
607}
608
609#define SCSI3ADDR_EQ(a, b) ( \
610 (a)[7] == (b)[7] && \
611 (a)[6] == (b)[6] && \
612 (a)[5] == (b)[5] && \
613 (a)[4] == (b)[4] && \
614 (a)[3] == (b)[3] && \
615 (a)[2] == (b)[2] && \
616 (a)[1] == (b)[1] && \
617 (a)[0] == (b)[0])
618
619static void fixup_botched_add(struct ctlr_info *h,
620 struct hpsa_scsi_dev_t *added)
621{
622
623
624
625 unsigned long flags;
626 int i, j;
627
628 spin_lock_irqsave(&h->lock, flags);
629 for (i = 0; i < h->ndevices; i++) {
630 if (h->dev[i] == added) {
631 for (j = i; j < h->ndevices-1; j++)
632 h->dev[j] = h->dev[j+1];
633 h->ndevices--;
634 break;
635 }
636 }
637 spin_unlock_irqrestore(&h->lock, flags);
638 kfree(added);
639}
640
641static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
642 struct hpsa_scsi_dev_t *dev2)
643{
644
645
646
647
648 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
649 sizeof(dev1->scsi3addr)) != 0)
650 return 0;
651 if (memcmp(dev1->device_id, dev2->device_id,
652 sizeof(dev1->device_id)) != 0)
653 return 0;
654 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
655 return 0;
656 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
657 return 0;
658 if (dev1->devtype != dev2->devtype)
659 return 0;
660 if (dev1->bus != dev2->bus)
661 return 0;
662 return 1;
663}
664
665
666
667
668
669
670static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
671 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
672 int *index)
673{
674 int i;
675#define DEVICE_NOT_FOUND 0
676#define DEVICE_CHANGED 1
677#define DEVICE_SAME 2
678 for (i = 0; i < haystack_size; i++) {
679 if (haystack[i] == NULL)
680 continue;
681 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
682 *index = i;
683 if (device_is_the_same(needle, haystack[i]))
684 return DEVICE_SAME;
685 else
686 return DEVICE_CHANGED;
687 }
688 }
689 *index = -1;
690 return DEVICE_NOT_FOUND;
691}
692
693static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
694 struct hpsa_scsi_dev_t *sd[], int nsds)
695{
696
697
698
699
700 int i, entry, device_change, changes = 0;
701 struct hpsa_scsi_dev_t *csd;
702 unsigned long flags;
703 struct hpsa_scsi_dev_t **added, **removed;
704 int nadded, nremoved;
705 struct Scsi_Host *sh = NULL;
706
707 added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA,
708 GFP_KERNEL);
709 removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA,
710 GFP_KERNEL);
711
712 if (!added || !removed) {
713 dev_warn(&h->pdev->dev, "out of memory in "
714 "adjust_hpsa_scsi_table\n");
715 goto free_and_out;
716 }
717
718 spin_lock_irqsave(&h->devlock, flags);
719
720
721
722
723
724
725 i = 0;
726 nremoved = 0;
727 nadded = 0;
728 while (i < h->ndevices) {
729 csd = h->dev[i];
730 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
731 if (device_change == DEVICE_NOT_FOUND) {
732 changes++;
733 hpsa_scsi_remove_entry(h, hostno, i,
734 removed, &nremoved);
735 continue;
736 } else if (device_change == DEVICE_CHANGED) {
737 changes++;
738 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
739 added, &nadded, removed, &nremoved);
740
741
742
743 sd[entry] = NULL;
744 }
745 i++;
746 }
747
748
749
750
751
752 for (i = 0; i < nsds; i++) {
753 if (!sd[i])
754 continue;
755 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
756 h->ndevices, &entry);
757 if (device_change == DEVICE_NOT_FOUND) {
758 changes++;
759 if (hpsa_scsi_add_entry(h, hostno, sd[i],
760 added, &nadded) != 0)
761 break;
762 sd[i] = NULL;
763 } else if (device_change == DEVICE_CHANGED) {
764
765 changes++;
766 dev_warn(&h->pdev->dev,
767 "device unexpectedly changed.\n");
768
769 }
770 }
771 spin_unlock_irqrestore(&h->devlock, flags);
772
773
774
775
776
777 if (hostno == -1 || !changes)
778 goto free_and_out;
779
780 sh = h->scsi_host;
781
782 for (i = 0; i < nremoved; i++) {
783 struct scsi_device *sdev =
784 scsi_device_lookup(sh, removed[i]->bus,
785 removed[i]->target, removed[i]->lun);
786 if (sdev != NULL) {
787 scsi_remove_device(sdev);
788 scsi_device_put(sdev);
789 } else {
790
791
792
793
794 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
795 " for removal.", hostno, removed[i]->bus,
796 removed[i]->target, removed[i]->lun);
797 }
798 kfree(removed[i]);
799 removed[i] = NULL;
800 }
801
802
803 for (i = 0; i < nadded; i++) {
804 if (scsi_add_device(sh, added[i]->bus,
805 added[i]->target, added[i]->lun) == 0)
806 continue;
807 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
808 "device not added.\n", hostno, added[i]->bus,
809 added[i]->target, added[i]->lun);
810
811
812
813 fixup_botched_add(h, added[i]);
814 }
815
816free_and_out:
817 kfree(added);
818 kfree(removed);
819}
820
821
822
823
824
825static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
826 int bus, int target, int lun)
827{
828 int i;
829 struct hpsa_scsi_dev_t *sd;
830
831 for (i = 0; i < h->ndevices; i++) {
832 sd = h->dev[i];
833 if (sd->bus == bus && sd->target == target && sd->lun == lun)
834 return sd;
835 }
836 return NULL;
837}
838
839
840static int hpsa_slave_alloc(struct scsi_device *sdev)
841{
842 struct hpsa_scsi_dev_t *sd;
843 unsigned long flags;
844 struct ctlr_info *h;
845
846 h = sdev_to_hba(sdev);
847 spin_lock_irqsave(&h->devlock, flags);
848 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
849 sdev_id(sdev), sdev->lun);
850 if (sd != NULL)
851 sdev->hostdata = sd;
852 spin_unlock_irqrestore(&h->devlock, flags);
853 return 0;
854}
855
856static void hpsa_slave_destroy(struct scsi_device *sdev)
857{
858
859}
860
861static void hpsa_scsi_setup(struct ctlr_info *h)
862{
863 h->ndevices = 0;
864 h->scsi_host = NULL;
865 spin_lock_init(&h->devlock);
866}
867
868static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
869{
870 int i;
871
872 if (!h->cmd_sg_list)
873 return;
874 for (i = 0; i < h->nr_cmds; i++) {
875 kfree(h->cmd_sg_list[i]);
876 h->cmd_sg_list[i] = NULL;
877 }
878 kfree(h->cmd_sg_list);
879 h->cmd_sg_list = NULL;
880}
881
882static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
883{
884 int i;
885
886 if (h->chainsize <= 0)
887 return 0;
888
889 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
890 GFP_KERNEL);
891 if (!h->cmd_sg_list)
892 return -ENOMEM;
893 for (i = 0; i < h->nr_cmds; i++) {
894 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
895 h->chainsize, GFP_KERNEL);
896 if (!h->cmd_sg_list[i])
897 goto clean;
898 }
899 return 0;
900
901clean:
902 hpsa_free_sg_chain_blocks(h);
903 return -ENOMEM;
904}
905
906static void hpsa_map_sg_chain_block(struct ctlr_info *h,
907 struct CommandList *c)
908{
909 struct SGDescriptor *chain_sg, *chain_block;
910 u64 temp64;
911
912 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
913 chain_block = h->cmd_sg_list[c->cmdindex];
914 chain_sg->Ext = HPSA_SG_CHAIN;
915 chain_sg->Len = sizeof(*chain_sg) *
916 (c->Header.SGTotal - h->max_cmd_sg_entries);
917 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
918 PCI_DMA_TODEVICE);
919 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
920 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
921}
922
923static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
924 struct CommandList *c)
925{
926 struct SGDescriptor *chain_sg;
927 union u64bit temp64;
928
929 if (c->Header.SGTotal <= h->max_cmd_sg_entries)
930 return;
931
932 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
933 temp64.val32.lower = chain_sg->Addr.lower;
934 temp64.val32.upper = chain_sg->Addr.upper;
935 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
936}
937
938static void complete_scsi_command(struct CommandList *cp,
939 int timeout, u32 tag)
940{
941 struct scsi_cmnd *cmd;
942 struct ctlr_info *h;
943 struct ErrorInfo *ei;
944
945 unsigned char sense_key;
946 unsigned char asc;
947 unsigned char ascq;
948
949 ei = cp->err_info;
950 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
951 h = cp->h;
952
953 scsi_dma_unmap(cmd);
954 if (cp->Header.SGTotal > h->max_cmd_sg_entries)
955 hpsa_unmap_sg_chain_block(h, cp);
956
957 cmd->result = (DID_OK << 16);
958 cmd->result |= (COMMAND_COMPLETE << 8);
959 cmd->result |= ei->ScsiStatus;
960
961
962 memcpy(cmd->sense_buffer, ei->SenseInfo,
963 ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
964 SCSI_SENSE_BUFFERSIZE :
965 ei->SenseLen);
966 scsi_set_resid(cmd, ei->ResidualCnt);
967
968 if (ei->CommandStatus == 0) {
969 cmd->scsi_done(cmd);
970 cmd_free(h, cp);
971 return;
972 }
973
974
975 switch (ei->CommandStatus) {
976
977 case CMD_TARGET_STATUS:
978 if (ei->ScsiStatus) {
979
980 sense_key = 0xf & ei->SenseInfo[2];
981
982 asc = ei->SenseInfo[12];
983
984 ascq = ei->SenseInfo[13];
985 }
986
987 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
988 if (check_for_unit_attention(h, cp)) {
989 cmd->result = DID_SOFT_ERROR << 16;
990 break;
991 }
992 if (sense_key == ILLEGAL_REQUEST) {
993
994
995
996
997 if (cp->Request.CDB[0] == REPORT_LUNS)
998 break;
999
1000
1001
1002
1003 if ((asc == 0x25) && (ascq == 0x0)) {
1004 dev_warn(&h->pdev->dev, "cp %p "
1005 "has check condition\n", cp);
1006 break;
1007 }
1008 }
1009
1010 if (sense_key == NOT_READY) {
1011
1012
1013
1014
1015 if ((asc == 0x04) && (ascq == 0x03)) {
1016 dev_warn(&h->pdev->dev, "cp %p "
1017 "has check condition: unit "
1018 "not ready, manual "
1019 "intervention required\n", cp);
1020 break;
1021 }
1022 }
1023 if (sense_key == ABORTED_COMMAND) {
1024
1025 dev_warn(&h->pdev->dev, "cp %p "
1026 "has check condition: aborted command: "
1027 "ASC: 0x%x, ASCQ: 0x%x\n",
1028 cp, asc, ascq);
1029 cmd->result = DID_SOFT_ERROR << 16;
1030 break;
1031 }
1032
1033 dev_warn(&h->pdev->dev, "cp %p has check condition: "
1034 "unknown type: "
1035 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1036 "Returning result: 0x%x, "
1037 "cmd=[%02x %02x %02x %02x %02x "
1038 "%02x %02x %02x %02x %02x %02x "
1039 "%02x %02x %02x %02x %02x]\n",
1040 cp, sense_key, asc, ascq,
1041 cmd->result,
1042 cmd->cmnd[0], cmd->cmnd[1],
1043 cmd->cmnd[2], cmd->cmnd[3],
1044 cmd->cmnd[4], cmd->cmnd[5],
1045 cmd->cmnd[6], cmd->cmnd[7],
1046 cmd->cmnd[8], cmd->cmnd[9],
1047 cmd->cmnd[10], cmd->cmnd[11],
1048 cmd->cmnd[12], cmd->cmnd[13],
1049 cmd->cmnd[14], cmd->cmnd[15]);
1050 break;
1051 }
1052
1053
1054
1055
1056
1057 if (ei->ScsiStatus) {
1058 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1059 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1060 "Returning result: 0x%x\n",
1061 cp, ei->ScsiStatus,
1062 sense_key, asc, ascq,
1063 cmd->result);
1064 } else {
1065 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1066 "Returning no connection.\n", cp),
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080 cmd->result = DID_NO_CONNECT << 16;
1081 }
1082 break;
1083
1084 case CMD_DATA_UNDERRUN:
1085 break;
1086 case CMD_DATA_OVERRUN:
1087 dev_warn(&h->pdev->dev, "cp %p has"
1088 " completed with data overrun "
1089 "reported\n", cp);
1090 break;
1091 case CMD_INVALID: {
1092
1093
1094
1095
1096
1097
1098
1099
1100 cmd->result = DID_NO_CONNECT << 16;
1101 }
1102 break;
1103 case CMD_PROTOCOL_ERR:
1104 dev_warn(&h->pdev->dev, "cp %p has "
1105 "protocol error \n", cp);
1106 break;
1107 case CMD_HARDWARE_ERR:
1108 cmd->result = DID_ERROR << 16;
1109 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
1110 break;
1111 case CMD_CONNECTION_LOST:
1112 cmd->result = DID_ERROR << 16;
1113 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1114 break;
1115 case CMD_ABORTED:
1116 cmd->result = DID_ABORT << 16;
1117 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1118 cp, ei->ScsiStatus);
1119 break;
1120 case CMD_ABORT_FAILED:
1121 cmd->result = DID_ERROR << 16;
1122 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1123 break;
1124 case CMD_UNSOLICITED_ABORT:
1125 cmd->result = DID_RESET << 16;
1126 dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited "
1127 "abort\n", cp);
1128 break;
1129 case CMD_TIMEOUT:
1130 cmd->result = DID_TIME_OUT << 16;
1131 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1132 break;
1133 default:
1134 cmd->result = DID_ERROR << 16;
1135 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1136 cp, ei->CommandStatus);
1137 }
1138 cmd->scsi_done(cmd);
1139 cmd_free(h, cp);
1140}
1141
1142static int hpsa_scsi_detect(struct ctlr_info *h)
1143{
1144 struct Scsi_Host *sh;
1145 int error;
1146
1147 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
1148 if (sh == NULL)
1149 goto fail;
1150
1151 sh->io_port = 0;
1152 sh->n_io_port = 0;
1153 sh->this_id = -1;
1154 sh->max_channel = 3;
1155 sh->max_cmd_len = MAX_COMMAND_SIZE;
1156 sh->max_lun = HPSA_MAX_LUN;
1157 sh->max_id = HPSA_MAX_LUN;
1158 sh->can_queue = h->nr_cmds;
1159 sh->cmd_per_lun = h->nr_cmds;
1160 sh->sg_tablesize = h->maxsgentries;
1161 h->scsi_host = sh;
1162 sh->hostdata[0] = (unsigned long) h;
1163 sh->irq = h->intr[PERF_MODE_INT];
1164 sh->unique_id = sh->irq;
1165 error = scsi_add_host(sh, &h->pdev->dev);
1166 if (error)
1167 goto fail_host_put;
1168 scsi_scan_host(sh);
1169 return 0;
1170
1171 fail_host_put:
1172 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
1173 " failed for controller %d\n", h->ctlr);
1174 scsi_host_put(sh);
1175 return error;
1176 fail:
1177 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
1178 " failed for controller %d\n", h->ctlr);
1179 return -ENOMEM;
1180}
1181
1182static void hpsa_pci_unmap(struct pci_dev *pdev,
1183 struct CommandList *c, int sg_used, int data_direction)
1184{
1185 int i;
1186 union u64bit addr64;
1187
1188 for (i = 0; i < sg_used; i++) {
1189 addr64.val32.lower = c->SG[i].Addr.lower;
1190 addr64.val32.upper = c->SG[i].Addr.upper;
1191 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
1192 data_direction);
1193 }
1194}
1195
1196static void hpsa_map_one(struct pci_dev *pdev,
1197 struct CommandList *cp,
1198 unsigned char *buf,
1199 size_t buflen,
1200 int data_direction)
1201{
1202 u64 addr64;
1203
1204 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1205 cp->Header.SGList = 0;
1206 cp->Header.SGTotal = 0;
1207 return;
1208 }
1209
1210 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
1211 cp->SG[0].Addr.lower =
1212 (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
1213 cp->SG[0].Addr.upper =
1214 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
1215 cp->SG[0].Len = buflen;
1216 cp->Header.SGList = (u8) 1;
1217 cp->Header.SGTotal = (u16) 1;
1218}
1219
1220static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1221 struct CommandList *c)
1222{
1223 DECLARE_COMPLETION_ONSTACK(wait);
1224
1225 c->waiting = &wait;
1226 enqueue_cmd_and_start_io(h, c);
1227 wait_for_completion(&wait);
1228}
1229
1230static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
1231 struct CommandList *c, int data_direction)
1232{
1233 int retry_count = 0;
1234
1235 do {
1236 memset(c->err_info, 0, sizeof(c->err_info));
1237 hpsa_scsi_do_simple_cmd_core(h, c);
1238 retry_count++;
1239 } while (check_for_unit_attention(h, c) && retry_count <= 3);
1240 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
1241}
1242
1243static void hpsa_scsi_interpret_error(struct CommandList *cp)
1244{
1245 struct ErrorInfo *ei;
1246 struct device *d = &cp->h->pdev->dev;
1247
1248 ei = cp->err_info;
1249 switch (ei->CommandStatus) {
1250 case CMD_TARGET_STATUS:
1251 dev_warn(d, "cmd %p has completed with errors\n", cp);
1252 dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
1253 ei->ScsiStatus);
1254 if (ei->ScsiStatus == 0)
1255 dev_warn(d, "SCSI status is abnormally zero. "
1256 "(probably indicates selection timeout "
1257 "reported incorrectly due to a known "
1258 "firmware bug, circa July, 2001.)\n");
1259 break;
1260 case CMD_DATA_UNDERRUN:
1261 dev_info(d, "UNDERRUN\n");
1262 break;
1263 case CMD_DATA_OVERRUN:
1264 dev_warn(d, "cp %p has completed with data overrun\n", cp);
1265 break;
1266 case CMD_INVALID: {
1267
1268
1269
1270 dev_warn(d, "cp %p is reported invalid (probably means "
1271 "target device no longer present)\n", cp);
1272
1273
1274 }
1275 break;
1276 case CMD_PROTOCOL_ERR:
1277 dev_warn(d, "cp %p has protocol error \n", cp);
1278 break;
1279 case CMD_HARDWARE_ERR:
1280
1281 dev_warn(d, "cp %p had hardware error\n", cp);
1282 break;
1283 case CMD_CONNECTION_LOST:
1284 dev_warn(d, "cp %p had connection lost\n", cp);
1285 break;
1286 case CMD_ABORTED:
1287 dev_warn(d, "cp %p was aborted\n", cp);
1288 break;
1289 case CMD_ABORT_FAILED:
1290 dev_warn(d, "cp %p reports abort failed\n", cp);
1291 break;
1292 case CMD_UNSOLICITED_ABORT:
1293 dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
1294 break;
1295 case CMD_TIMEOUT:
1296 dev_warn(d, "cp %p timed out\n", cp);
1297 break;
1298 default:
1299 dev_warn(d, "cp %p returned unknown status %x\n", cp,
1300 ei->CommandStatus);
1301 }
1302}
1303
1304static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
1305 unsigned char page, unsigned char *buf,
1306 unsigned char bufsize)
1307{
1308 int rc = IO_OK;
1309 struct CommandList *c;
1310 struct ErrorInfo *ei;
1311
1312 c = cmd_special_alloc(h);
1313
1314 if (c == NULL) {
1315 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1316 return -ENOMEM;
1317 }
1318
1319 fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
1320 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1321 ei = c->err_info;
1322 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
1323 hpsa_scsi_interpret_error(c);
1324 rc = -1;
1325 }
1326 cmd_special_free(h, c);
1327 return rc;
1328}
1329
1330static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
1331{
1332 int rc = IO_OK;
1333 struct CommandList *c;
1334 struct ErrorInfo *ei;
1335
1336 c = cmd_special_alloc(h);
1337
1338 if (c == NULL) {
1339 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1340 return -ENOMEM;
1341 }
1342
1343 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
1344 hpsa_scsi_do_simple_cmd_core(h, c);
1345
1346
1347 ei = c->err_info;
1348 if (ei->CommandStatus != 0) {
1349 hpsa_scsi_interpret_error(c);
1350 rc = -1;
1351 }
1352 cmd_special_free(h, c);
1353 return rc;
1354}
1355
1356static void hpsa_get_raid_level(struct ctlr_info *h,
1357 unsigned char *scsi3addr, unsigned char *raid_level)
1358{
1359 int rc;
1360 unsigned char *buf;
1361
1362 *raid_level = RAID_UNKNOWN;
1363 buf = kzalloc(64, GFP_KERNEL);
1364 if (!buf)
1365 return;
1366 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
1367 if (rc == 0)
1368 *raid_level = buf[8];
1369 if (*raid_level > RAID_UNKNOWN)
1370 *raid_level = RAID_UNKNOWN;
1371 kfree(buf);
1372 return;
1373}
1374
1375
1376static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
1377 unsigned char *device_id, int buflen)
1378{
1379 int rc;
1380 unsigned char *buf;
1381
1382 if (buflen > 16)
1383 buflen = 16;
1384 buf = kzalloc(64, GFP_KERNEL);
1385 if (!buf)
1386 return -1;
1387 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
1388 if (rc == 0)
1389 memcpy(device_id, &buf[8], buflen);
1390 kfree(buf);
1391 return rc != 0;
1392}
1393
1394static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
1395 struct ReportLUNdata *buf, int bufsize,
1396 int extended_response)
1397{
1398 int rc = IO_OK;
1399 struct CommandList *c;
1400 unsigned char scsi3addr[8];
1401 struct ErrorInfo *ei;
1402
1403 c = cmd_special_alloc(h);
1404 if (c == NULL) {
1405 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1406 return -1;
1407 }
1408
1409 memset(scsi3addr, 0, sizeof(scsi3addr));
1410 fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
1411 buf, bufsize, 0, scsi3addr, TYPE_CMD);
1412 if (extended_response)
1413 c->Request.CDB[1] = extended_response;
1414 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1415 ei = c->err_info;
1416 if (ei->CommandStatus != 0 &&
1417 ei->CommandStatus != CMD_DATA_UNDERRUN) {
1418 hpsa_scsi_interpret_error(c);
1419 rc = -1;
1420 }
1421 cmd_special_free(h, c);
1422 return rc;
1423}
1424
1425static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
1426 struct ReportLUNdata *buf,
1427 int bufsize, int extended_response)
1428{
1429 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
1430}
1431
1432static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
1433 struct ReportLUNdata *buf, int bufsize)
1434{
1435 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
1436}
1437
1438static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
1439 int bus, int target, int lun)
1440{
1441 device->bus = bus;
1442 device->target = target;
1443 device->lun = lun;
1444}
1445
1446static int hpsa_update_device_info(struct ctlr_info *h,
1447 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device)
1448{
1449#define OBDR_TAPE_INQ_SIZE 49
1450 unsigned char *inq_buff;
1451
1452 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1453 if (!inq_buff)
1454 goto bail_out;
1455
1456
1457 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
1458 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
1459
1460 dev_err(&h->pdev->dev,
1461 "hpsa_update_device_info: inquiry failed\n");
1462 goto bail_out;
1463 }
1464
1465 this_device->devtype = (inq_buff[0] & 0x1f);
1466 memcpy(this_device->scsi3addr, scsi3addr, 8);
1467 memcpy(this_device->vendor, &inq_buff[8],
1468 sizeof(this_device->vendor));
1469 memcpy(this_device->model, &inq_buff[16],
1470 sizeof(this_device->model));
1471 memset(this_device->device_id, 0,
1472 sizeof(this_device->device_id));
1473 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
1474 sizeof(this_device->device_id));
1475
1476 if (this_device->devtype == TYPE_DISK &&
1477 is_logical_dev_addr_mode(scsi3addr))
1478 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
1479 else
1480 this_device->raid_level = RAID_UNKNOWN;
1481
1482 kfree(inq_buff);
1483 return 0;
1484
1485bail_out:
1486 kfree(inq_buff);
1487 return 1;
1488}
1489
1490static unsigned char *msa2xxx_model[] = {
1491 "MSA2012",
1492 "MSA2024",
1493 "MSA2312",
1494 "MSA2324",
1495 NULL,
1496};
1497
1498static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1499{
1500 int i;
1501
1502 for (i = 0; msa2xxx_model[i]; i++)
1503 if (strncmp(device->model, msa2xxx_model[i],
1504 strlen(msa2xxx_model[i])) == 0)
1505 return 1;
1506 return 0;
1507}
1508
1509
1510
1511
1512
1513
1514
1515
1516static void figure_bus_target_lun(struct ctlr_info *h,
1517 u8 *lunaddrbytes, int *bus, int *target, int *lun,
1518 struct hpsa_scsi_dev_t *device)
1519{
1520 u32 lunid;
1521
1522 if (is_logical_dev_addr_mode(lunaddrbytes)) {
1523
1524 if (unlikely(is_scsi_rev_5(h))) {
1525
1526
1527
1528 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1529 *bus = 0;
1530 *target = 0;
1531 *lun = (lunid & 0x3fff) + 1;
1532 } else {
1533
1534 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1535 if (is_msa2xxx(h, device)) {
1536
1537
1538
1539
1540 *bus = 1;
1541 *target = (lunid >> 16) & 0x3fff;
1542 *lun = lunid & 0x00ff;
1543 } else {
1544
1545 *bus = 0;
1546 *lun = 0;
1547 *target = lunid & 0x3fff;
1548 }
1549 }
1550 } else {
1551
1552 if (is_hba_lunid(lunaddrbytes))
1553 if (unlikely(is_scsi_rev_5(h))) {
1554 *bus = 0;
1555 *target = 0;
1556 *lun = 0;
1557 return;
1558 } else
1559 *bus = 3;
1560 else
1561 *bus = 2;
1562 *target = -1;
1563 *lun = -1;
1564 }
1565}
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
1579 struct hpsa_scsi_dev_t *tmpdevice,
1580 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
1581 int bus, int target, int lun, unsigned long lunzerobits[],
1582 int *nmsa2xxx_enclosures)
1583{
1584 unsigned char scsi3addr[8];
1585
1586 if (test_bit(target, lunzerobits))
1587 return 0;
1588
1589 if (!is_logical_dev_addr_mode(lunaddrbytes))
1590 return 0;
1591
1592 if (!is_msa2xxx(h, tmpdevice))
1593 return 0;
1594
1595 if (lun == 0)
1596 return 0;
1597
1598 if (is_hba_lunid(scsi3addr))
1599 return 0;
1600
1601 if (is_scsi_rev_5(h))
1602 return 0;
1603
1604#define MAX_MSA2XXX_ENCLOSURES 32
1605 if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
1606 dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
1607 "enclosures exceeded. Check your hardware "
1608 "configuration.");
1609 return 0;
1610 }
1611
1612 memset(scsi3addr, 0, 8);
1613 scsi3addr[3] = target;
1614 if (hpsa_update_device_info(h, scsi3addr, this_device))
1615 return 0;
1616 (*nmsa2xxx_enclosures)++;
1617 hpsa_set_bus_target_lun(this_device, bus, target, 0);
1618 set_bit(target, lunzerobits);
1619 return 1;
1620}
1621
1622
1623
1624
1625
1626
1627
1628static int hpsa_gather_lun_info(struct ctlr_info *h,
1629 int reportlunsize,
1630 struct ReportLUNdata *physdev, u32 *nphysicals,
1631 struct ReportLUNdata *logdev, u32 *nlogicals)
1632{
1633 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
1634 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
1635 return -1;
1636 }
1637 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
1638 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
1639 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
1640 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1641 *nphysicals - HPSA_MAX_PHYS_LUN);
1642 *nphysicals = HPSA_MAX_PHYS_LUN;
1643 }
1644 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
1645 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
1646 return -1;
1647 }
1648 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
1649
1650 if (*nlogicals > HPSA_MAX_LUN) {
1651 dev_warn(&h->pdev->dev,
1652 "maximum logical LUNs (%d) exceeded. "
1653 "%d LUNs ignored.\n", HPSA_MAX_LUN,
1654 *nlogicals - HPSA_MAX_LUN);
1655 *nlogicals = HPSA_MAX_LUN;
1656 }
1657 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
1658 dev_warn(&h->pdev->dev,
1659 "maximum logical + physical LUNs (%d) exceeded. "
1660 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1661 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
1662 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
1663 }
1664 return 0;
1665}
1666
1667u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
1668 int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list,
1669 struct ReportLUNdata *logdev_list)
1670{
1671
1672
1673
1674
1675
1676 int logicals_start = nphysicals + (raid_ctlr_position == 0);
1677 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
1678
1679 if (i == raid_ctlr_position)
1680 return RAID_CTLR_LUNID;
1681
1682 if (i < logicals_start)
1683 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0];
1684
1685 if (i < last_device)
1686 return &logdev_list->LUN[i - nphysicals -
1687 (raid_ctlr_position == 0)][0];
1688 BUG();
1689 return NULL;
1690}
1691
1692static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1693{
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704 struct ReportLUNdata *physdev_list = NULL;
1705 struct ReportLUNdata *logdev_list = NULL;
1706 unsigned char *inq_buff = NULL;
1707 u32 nphysicals = 0;
1708 u32 nlogicals = 0;
1709 u32 ndev_allocated = 0;
1710 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
1711 int ncurrent = 0;
1712 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
1713 int i, nmsa2xxx_enclosures, ndevs_to_allocate;
1714 int bus, target, lun;
1715 int raid_ctlr_position;
1716 DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
1717
1718 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA,
1719 GFP_KERNEL);
1720 physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1721 logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1722 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1723 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
1724
1725 if (!currentsd || !physdev_list || !logdev_list ||
1726 !inq_buff || !tmpdevice) {
1727 dev_err(&h->pdev->dev, "out of memory\n");
1728 goto out;
1729 }
1730 memset(lunzerobits, 0, sizeof(lunzerobits));
1731
1732 if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
1733 logdev_list, &nlogicals))
1734 goto out;
1735
1736
1737
1738
1739
1740 ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1;
1741
1742
1743 for (i = 0; i < ndevs_to_allocate; i++) {
1744 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
1745 if (!currentsd[i]) {
1746 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
1747 __FILE__, __LINE__);
1748 goto out;
1749 }
1750 ndev_allocated++;
1751 }
1752
1753 if (unlikely(is_scsi_rev_5(h)))
1754 raid_ctlr_position = 0;
1755 else
1756 raid_ctlr_position = nphysicals + nlogicals;
1757
1758
1759 nmsa2xxx_enclosures = 0;
1760 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
1761 u8 *lunaddrbytes;
1762
1763
1764 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
1765 i, nphysicals, nlogicals, physdev_list, logdev_list);
1766
1767 if (lunaddrbytes[3] & 0xC0 &&
1768 i < nphysicals + (raid_ctlr_position == 0))
1769 continue;
1770
1771
1772 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice))
1773 continue;
1774 figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
1775 tmpdevice);
1776 this_device = currentsd[ncurrent];
1777
1778
1779
1780
1781
1782
1783
1784
1785 if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device,
1786 lunaddrbytes, bus, target, lun, lunzerobits,
1787 &nmsa2xxx_enclosures)) {
1788 ncurrent++;
1789 this_device = currentsd[ncurrent];
1790 }
1791
1792 *this_device = *tmpdevice;
1793 hpsa_set_bus_target_lun(this_device, bus, target, lun);
1794
1795 switch (this_device->devtype) {
1796 case TYPE_ROM: {
1797
1798
1799
1800
1801
1802
1803
1804 char obdr_sig[7];
1805#define OBDR_TAPE_SIG "$DR-10"
1806 strncpy(obdr_sig, &inq_buff[43], 6);
1807 obdr_sig[6] = '\0';
1808 if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
1809
1810 break;
1811 }
1812 ncurrent++;
1813 break;
1814 case TYPE_DISK:
1815 if (i < nphysicals)
1816 break;
1817 ncurrent++;
1818 break;
1819 case TYPE_TAPE:
1820 case TYPE_MEDIUM_CHANGER:
1821 ncurrent++;
1822 break;
1823 case TYPE_RAID:
1824
1825
1826
1827
1828
1829 if (!is_hba_lunid(lunaddrbytes))
1830 break;
1831 ncurrent++;
1832 break;
1833 default:
1834 break;
1835 }
1836 if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA)
1837 break;
1838 }
1839 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
1840out:
1841 kfree(tmpdevice);
1842 for (i = 0; i < ndev_allocated; i++)
1843 kfree(currentsd[i]);
1844 kfree(currentsd);
1845 kfree(inq_buff);
1846 kfree(physdev_list);
1847 kfree(logdev_list);
1848}
1849
1850
1851
1852
1853
1854static int hpsa_scatter_gather(struct ctlr_info *h,
1855 struct CommandList *cp,
1856 struct scsi_cmnd *cmd)
1857{
1858 unsigned int len;
1859 struct scatterlist *sg;
1860 u64 addr64;
1861 int use_sg, i, sg_index, chained;
1862 struct SGDescriptor *curr_sg;
1863
1864 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
1865
1866 use_sg = scsi_dma_map(cmd);
1867 if (use_sg < 0)
1868 return use_sg;
1869
1870 if (!use_sg)
1871 goto sglist_finished;
1872
1873 curr_sg = cp->SG;
1874 chained = 0;
1875 sg_index = 0;
1876 scsi_for_each_sg(cmd, sg, use_sg, i) {
1877 if (i == h->max_cmd_sg_entries - 1 &&
1878 use_sg > h->max_cmd_sg_entries) {
1879 chained = 1;
1880 curr_sg = h->cmd_sg_list[cp->cmdindex];
1881 sg_index = 0;
1882 }
1883 addr64 = (u64) sg_dma_address(sg);
1884 len = sg_dma_len(sg);
1885 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
1886 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
1887 curr_sg->Len = len;
1888 curr_sg->Ext = 0;
1889 curr_sg++;
1890 }
1891
1892 if (use_sg + chained > h->maxSG)
1893 h->maxSG = use_sg + chained;
1894
1895 if (chained) {
1896 cp->Header.SGList = h->max_cmd_sg_entries;
1897 cp->Header.SGTotal = (u16) (use_sg + 1);
1898 hpsa_map_sg_chain_block(h, cp);
1899 return 0;
1900 }
1901
1902sglist_finished:
1903
1904 cp->Header.SGList = (u8) use_sg;
1905 cp->Header.SGTotal = (u16) use_sg;
1906 return 0;
1907}
1908
1909
1910static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
1911 void (*done)(struct scsi_cmnd *))
1912{
1913 struct ctlr_info *h;
1914 struct hpsa_scsi_dev_t *dev;
1915 unsigned char scsi3addr[8];
1916 struct CommandList *c;
1917 unsigned long flags;
1918
1919
1920 h = sdev_to_hba(cmd->device);
1921 dev = cmd->device->hostdata;
1922 if (!dev) {
1923 cmd->result = DID_NO_CONNECT << 16;
1924 done(cmd);
1925 return 0;
1926 }
1927 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
1928
1929
1930 spin_lock_irqsave(&h->lock, flags);
1931 c = cmd_alloc(h);
1932 spin_unlock_irqrestore(&h->lock, flags);
1933 if (c == NULL) {
1934 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
1935 return SCSI_MLQUEUE_HOST_BUSY;
1936 }
1937
1938
1939
1940 cmd->scsi_done = done;
1941
1942
1943 cmd->host_scribble = (unsigned char *) c;
1944
1945 c->cmd_type = CMD_SCSI;
1946 c->scsi_cmd = cmd;
1947 c->Header.ReplyQueue = 0;
1948 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
1949 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
1950 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
1951
1952
1953
1954 c->Request.Timeout = 0;
1955 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
1956 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
1957 c->Request.CDBLen = cmd->cmd_len;
1958 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
1959 c->Request.Type.Type = TYPE_CMD;
1960 c->Request.Type.Attribute = ATTR_SIMPLE;
1961 switch (cmd->sc_data_direction) {
1962 case DMA_TO_DEVICE:
1963 c->Request.Type.Direction = XFER_WRITE;
1964 break;
1965 case DMA_FROM_DEVICE:
1966 c->Request.Type.Direction = XFER_READ;
1967 break;
1968 case DMA_NONE:
1969 c->Request.Type.Direction = XFER_NONE;
1970 break;
1971 case DMA_BIDIRECTIONAL:
1972
1973
1974
1975
1976
1977 c->Request.Type.Direction = XFER_RSVD;
1978
1979
1980
1981
1982
1983
1984
1985
1986 break;
1987
1988 default:
1989 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
1990 cmd->sc_data_direction);
1991 BUG();
1992 break;
1993 }
1994
1995 if (hpsa_scatter_gather(h, c, cmd) < 0) {
1996 cmd_free(h, c);
1997 return SCSI_MLQUEUE_HOST_BUSY;
1998 }
1999 enqueue_cmd_and_start_io(h, c);
2000
2001 return 0;
2002}
2003
2004static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
2005
2006static void hpsa_scan_start(struct Scsi_Host *sh)
2007{
2008 struct ctlr_info *h = shost_to_hba(sh);
2009 unsigned long flags;
2010
2011
2012 while (1) {
2013 spin_lock_irqsave(&h->scan_lock, flags);
2014 if (h->scan_finished)
2015 break;
2016 spin_unlock_irqrestore(&h->scan_lock, flags);
2017 wait_event(h->scan_wait_queue, h->scan_finished);
2018
2019
2020
2021
2022
2023 }
2024 h->scan_finished = 0;
2025 spin_unlock_irqrestore(&h->scan_lock, flags);
2026
2027 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
2028
2029 spin_lock_irqsave(&h->scan_lock, flags);
2030 h->scan_finished = 1;
2031 wake_up_all(&h->scan_wait_queue);
2032 spin_unlock_irqrestore(&h->scan_lock, flags);
2033}
2034
2035static int hpsa_scan_finished(struct Scsi_Host *sh,
2036 unsigned long elapsed_time)
2037{
2038 struct ctlr_info *h = shost_to_hba(sh);
2039 unsigned long flags;
2040 int finished;
2041
2042 spin_lock_irqsave(&h->scan_lock, flags);
2043 finished = h->scan_finished;
2044 spin_unlock_irqrestore(&h->scan_lock, flags);
2045 return finished;
2046}
2047
2048static int hpsa_change_queue_depth(struct scsi_device *sdev,
2049 int qdepth, int reason)
2050{
2051 struct ctlr_info *h = sdev_to_hba(sdev);
2052
2053 if (reason != SCSI_QDEPTH_DEFAULT)
2054 return -ENOTSUPP;
2055
2056 if (qdepth < 1)
2057 qdepth = 1;
2058 else
2059 if (qdepth > h->nr_cmds)
2060 qdepth = h->nr_cmds;
2061 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2062 return sdev->queue_depth;
2063}
2064
2065static void hpsa_unregister_scsi(struct ctlr_info *h)
2066{
2067
2068 scsi_remove_host(h->scsi_host);
2069 scsi_host_put(h->scsi_host);
2070 h->scsi_host = NULL;
2071}
2072
2073static int hpsa_register_scsi(struct ctlr_info *h)
2074{
2075 int rc;
2076
2077 rc = hpsa_scsi_detect(h);
2078 if (rc != 0)
2079 dev_err(&h->pdev->dev, "hpsa_register_scsi: failed"
2080 " hpsa_scsi_detect(), rc is %d\n", rc);
2081 return rc;
2082}
2083
2084static int wait_for_device_to_become_ready(struct ctlr_info *h,
2085 unsigned char lunaddr[])
2086{
2087 int rc = 0;
2088 int count = 0;
2089 int waittime = 1;
2090 struct CommandList *c;
2091
2092 c = cmd_special_alloc(h);
2093 if (!c) {
2094 dev_warn(&h->pdev->dev, "out of memory in "
2095 "wait_for_device_to_become_ready.\n");
2096 return IO_ERROR;
2097 }
2098
2099
2100 while (count < HPSA_TUR_RETRY_LIMIT) {
2101
2102
2103
2104
2105 msleep(1000 * waittime);
2106 count++;
2107
2108
2109 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
2110 waittime = waittime * 2;
2111
2112
2113 fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
2114 hpsa_scsi_do_simple_cmd_core(h, c);
2115
2116
2117 if (c->err_info->CommandStatus == CMD_SUCCESS)
2118 break;
2119
2120 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2121 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
2122 (c->err_info->SenseInfo[2] == NO_SENSE ||
2123 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
2124 break;
2125
2126 dev_warn(&h->pdev->dev, "waiting %d secs "
2127 "for device to become ready.\n", waittime);
2128 rc = 1;
2129 }
2130
2131 if (rc)
2132 dev_warn(&h->pdev->dev, "giving up on device.\n");
2133 else
2134 dev_warn(&h->pdev->dev, "device is ready.\n");
2135
2136 cmd_special_free(h, c);
2137 return rc;
2138}
2139
2140
2141
2142
2143static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
2144{
2145 int rc;
2146 struct ctlr_info *h;
2147 struct hpsa_scsi_dev_t *dev;
2148
2149
2150 h = sdev_to_hba(scsicmd->device);
2151 if (h == NULL)
2152 return FAILED;
2153 dev = scsicmd->device->hostdata;
2154 if (!dev) {
2155 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
2156 "device lookup failed.\n");
2157 return FAILED;
2158 }
2159 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
2160 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
2161
2162 rc = hpsa_send_reset(h, dev->scsi3addr);
2163 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
2164 return SUCCESS;
2165
2166 dev_warn(&h->pdev->dev, "resetting device failed.\n");
2167 return FAILED;
2168}
2169
2170
2171
2172
2173
2174
2175
2176static struct CommandList *cmd_alloc(struct ctlr_info *h)
2177{
2178 struct CommandList *c;
2179 int i;
2180 union u64bit temp64;
2181 dma_addr_t cmd_dma_handle, err_dma_handle;
2182
2183 do {
2184 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
2185 if (i == h->nr_cmds)
2186 return NULL;
2187 } while (test_and_set_bit
2188 (i & (BITS_PER_LONG - 1),
2189 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
2190 c = h->cmd_pool + i;
2191 memset(c, 0, sizeof(*c));
2192 cmd_dma_handle = h->cmd_pool_dhandle
2193 + i * sizeof(*c);
2194 c->err_info = h->errinfo_pool + i;
2195 memset(c->err_info, 0, sizeof(*c->err_info));
2196 err_dma_handle = h->errinfo_pool_dhandle
2197 + i * sizeof(*c->err_info);
2198 h->nr_allocs++;
2199
2200 c->cmdindex = i;
2201
2202 INIT_HLIST_NODE(&c->list);
2203 c->busaddr = (u32) cmd_dma_handle;
2204 temp64.val = (u64) err_dma_handle;
2205 c->ErrDesc.Addr.lower = temp64.val32.lower;
2206 c->ErrDesc.Addr.upper = temp64.val32.upper;
2207 c->ErrDesc.Len = sizeof(*c->err_info);
2208
2209 c->h = h;
2210 return c;
2211}
2212
2213
2214
2215
2216
2217static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
2218{
2219 struct CommandList *c;
2220 union u64bit temp64;
2221 dma_addr_t cmd_dma_handle, err_dma_handle;
2222
2223 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
2224 if (c == NULL)
2225 return NULL;
2226 memset(c, 0, sizeof(*c));
2227
2228 c->cmdindex = -1;
2229
2230 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
2231 &err_dma_handle);
2232
2233 if (c->err_info == NULL) {
2234 pci_free_consistent(h->pdev,
2235 sizeof(*c), c, cmd_dma_handle);
2236 return NULL;
2237 }
2238 memset(c->err_info, 0, sizeof(*c->err_info));
2239
2240 INIT_HLIST_NODE(&c->list);
2241 c->busaddr = (u32) cmd_dma_handle;
2242 temp64.val = (u64) err_dma_handle;
2243 c->ErrDesc.Addr.lower = temp64.val32.lower;
2244 c->ErrDesc.Addr.upper = temp64.val32.upper;
2245 c->ErrDesc.Len = sizeof(*c->err_info);
2246
2247 c->h = h;
2248 return c;
2249}
2250
2251static void cmd_free(struct ctlr_info *h, struct CommandList *c)
2252{
2253 int i;
2254
2255 i = c - h->cmd_pool;
2256 clear_bit(i & (BITS_PER_LONG - 1),
2257 h->cmd_pool_bits + (i / BITS_PER_LONG));
2258 h->nr_frees++;
2259}
2260
2261static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
2262{
2263 union u64bit temp64;
2264
2265 temp64.val32.lower = c->ErrDesc.Addr.lower;
2266 temp64.val32.upper = c->ErrDesc.Addr.upper;
2267 pci_free_consistent(h->pdev, sizeof(*c->err_info),
2268 c->err_info, (dma_addr_t) temp64.val);
2269 pci_free_consistent(h->pdev, sizeof(*c),
2270 c, (dma_addr_t) c->busaddr);
2271}
2272
2273#ifdef CONFIG_COMPAT
2274
2275static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
2276{
2277 IOCTL32_Command_struct __user *arg32 =
2278 (IOCTL32_Command_struct __user *) arg;
2279 IOCTL_Command_struct arg64;
2280 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
2281 int err;
2282 u32 cp;
2283
2284 err = 0;
2285 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2286 sizeof(arg64.LUN_info));
2287 err |= copy_from_user(&arg64.Request, &arg32->Request,
2288 sizeof(arg64.Request));
2289 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2290 sizeof(arg64.error_info));
2291 err |= get_user(arg64.buf_size, &arg32->buf_size);
2292 err |= get_user(cp, &arg32->buf);
2293 arg64.buf = compat_ptr(cp);
2294 err |= copy_to_user(p, &arg64, sizeof(arg64));
2295
2296 if (err)
2297 return -EFAULT;
2298
2299 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p);
2300 if (err)
2301 return err;
2302 err |= copy_in_user(&arg32->error_info, &p->error_info,
2303 sizeof(arg32->error_info));
2304 if (err)
2305 return -EFAULT;
2306 return err;
2307}
2308
2309static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2310 int cmd, void *arg)
2311{
2312 BIG_IOCTL32_Command_struct __user *arg32 =
2313 (BIG_IOCTL32_Command_struct __user *) arg;
2314 BIG_IOCTL_Command_struct arg64;
2315 BIG_IOCTL_Command_struct __user *p =
2316 compat_alloc_user_space(sizeof(arg64));
2317 int err;
2318 u32 cp;
2319
2320 err = 0;
2321 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2322 sizeof(arg64.LUN_info));
2323 err |= copy_from_user(&arg64.Request, &arg32->Request,
2324 sizeof(arg64.Request));
2325 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2326 sizeof(arg64.error_info));
2327 err |= get_user(arg64.buf_size, &arg32->buf_size);
2328 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
2329 err |= get_user(cp, &arg32->buf);
2330 arg64.buf = compat_ptr(cp);
2331 err |= copy_to_user(p, &arg64, sizeof(arg64));
2332
2333 if (err)
2334 return -EFAULT;
2335
2336 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
2337 if (err)
2338 return err;
2339 err |= copy_in_user(&arg32->error_info, &p->error_info,
2340 sizeof(arg32->error_info));
2341 if (err)
2342 return -EFAULT;
2343 return err;
2344}
2345
2346static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
2347{
2348 switch (cmd) {
2349 case CCISS_GETPCIINFO:
2350 case CCISS_GETINTINFO:
2351 case CCISS_SETINTINFO:
2352 case CCISS_GETNODENAME:
2353 case CCISS_SETNODENAME:
2354 case CCISS_GETHEARTBEAT:
2355 case CCISS_GETBUSTYPES:
2356 case CCISS_GETFIRMVER:
2357 case CCISS_GETDRIVVER:
2358 case CCISS_REVALIDVOLS:
2359 case CCISS_DEREGDISK:
2360 case CCISS_REGNEWDISK:
2361 case CCISS_REGNEWD:
2362 case CCISS_RESCANDISK:
2363 case CCISS_GETLUNINFO:
2364 return hpsa_ioctl(dev, cmd, arg);
2365
2366 case CCISS_PASSTHRU32:
2367 return hpsa_ioctl32_passthru(dev, cmd, arg);
2368 case CCISS_BIG_PASSTHRU32:
2369 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
2370
2371 default:
2372 return -ENOIOCTLCMD;
2373 }
2374}
2375#endif
2376
2377static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
2378{
2379 struct hpsa_pci_info pciinfo;
2380
2381 if (!argp)
2382 return -EINVAL;
2383 pciinfo.domain = pci_domain_nr(h->pdev->bus);
2384 pciinfo.bus = h->pdev->bus->number;
2385 pciinfo.dev_fn = h->pdev->devfn;
2386 pciinfo.board_id = h->board_id;
2387 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
2388 return -EFAULT;
2389 return 0;
2390}
2391
2392static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
2393{
2394 DriverVer_type DriverVer;
2395 unsigned char vmaj, vmin, vsubmin;
2396 int rc;
2397
2398 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
2399 &vmaj, &vmin, &vsubmin);
2400 if (rc != 3) {
2401 dev_info(&h->pdev->dev, "driver version string '%s' "
2402 "unrecognized.", HPSA_DRIVER_VERSION);
2403 vmaj = 0;
2404 vmin = 0;
2405 vsubmin = 0;
2406 }
2407 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
2408 if (!argp)
2409 return -EINVAL;
2410 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
2411 return -EFAULT;
2412 return 0;
2413}
2414
2415static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2416{
2417 IOCTL_Command_struct iocommand;
2418 struct CommandList *c;
2419 char *buff = NULL;
2420 union u64bit temp64;
2421
2422 if (!argp)
2423 return -EINVAL;
2424 if (!capable(CAP_SYS_RAWIO))
2425 return -EPERM;
2426 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
2427 return -EFAULT;
2428 if ((iocommand.buf_size < 1) &&
2429 (iocommand.Request.Type.Direction != XFER_NONE)) {
2430 return -EINVAL;
2431 }
2432 if (iocommand.buf_size > 0) {
2433 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
2434 if (buff == NULL)
2435 return -EFAULT;
2436 }
2437 if (iocommand.Request.Type.Direction == XFER_WRITE) {
2438
2439 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
2440 kfree(buff);
2441 return -EFAULT;
2442 }
2443 } else
2444 memset(buff, 0, iocommand.buf_size);
2445 c = cmd_special_alloc(h);
2446 if (c == NULL) {
2447 kfree(buff);
2448 return -ENOMEM;
2449 }
2450
2451 c->cmd_type = CMD_IOCTL_PEND;
2452
2453 c->Header.ReplyQueue = 0;
2454 if (iocommand.buf_size > 0) {
2455 c->Header.SGList = 1;
2456 c->Header.SGTotal = 1;
2457 } else {
2458 c->Header.SGList = 0;
2459 c->Header.SGTotal = 0;
2460 }
2461 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
2462
2463 c->Header.Tag.lower = c->busaddr;
2464
2465
2466 memcpy(&c->Request, &iocommand.Request,
2467 sizeof(c->Request));
2468
2469
2470 if (iocommand.buf_size > 0) {
2471 temp64.val = pci_map_single(h->pdev, buff,
2472 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
2473 c->SG[0].Addr.lower = temp64.val32.lower;
2474 c->SG[0].Addr.upper = temp64.val32.upper;
2475 c->SG[0].Len = iocommand.buf_size;
2476 c->SG[0].Ext = 0;
2477 }
2478 hpsa_scsi_do_simple_cmd_core(h, c);
2479 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
2480 check_ioctl_unit_attention(h, c);
2481
2482
2483 memcpy(&iocommand.error_info, c->err_info,
2484 sizeof(iocommand.error_info));
2485 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
2486 kfree(buff);
2487 cmd_special_free(h, c);
2488 return -EFAULT;
2489 }
2490
2491 if (iocommand.Request.Type.Direction == XFER_READ) {
2492
2493 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
2494 kfree(buff);
2495 cmd_special_free(h, c);
2496 return -EFAULT;
2497 }
2498 }
2499 kfree(buff);
2500 cmd_special_free(h, c);
2501 return 0;
2502}
2503
2504static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2505{
2506 BIG_IOCTL_Command_struct *ioc;
2507 struct CommandList *c;
2508 unsigned char **buff = NULL;
2509 int *buff_size = NULL;
2510 union u64bit temp64;
2511 BYTE sg_used = 0;
2512 int status = 0;
2513 int i;
2514 u32 left;
2515 u32 sz;
2516 BYTE __user *data_ptr;
2517
2518 if (!argp)
2519 return -EINVAL;
2520 if (!capable(CAP_SYS_RAWIO))
2521 return -EPERM;
2522 ioc = (BIG_IOCTL_Command_struct *)
2523 kmalloc(sizeof(*ioc), GFP_KERNEL);
2524 if (!ioc) {
2525 status = -ENOMEM;
2526 goto cleanup1;
2527 }
2528 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
2529 status = -EFAULT;
2530 goto cleanup1;
2531 }
2532 if ((ioc->buf_size < 1) &&
2533 (ioc->Request.Type.Direction != XFER_NONE)) {
2534 status = -EINVAL;
2535 goto cleanup1;
2536 }
2537
2538 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
2539 status = -EINVAL;
2540 goto cleanup1;
2541 }
2542 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
2543 status = -EINVAL;
2544 goto cleanup1;
2545 }
2546 buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
2547 if (!buff) {
2548 status = -ENOMEM;
2549 goto cleanup1;
2550 }
2551 buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
2552 if (!buff_size) {
2553 status = -ENOMEM;
2554 goto cleanup1;
2555 }
2556 left = ioc->buf_size;
2557 data_ptr = ioc->buf;
2558 while (left) {
2559 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
2560 buff_size[sg_used] = sz;
2561 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
2562 if (buff[sg_used] == NULL) {
2563 status = -ENOMEM;
2564 goto cleanup1;
2565 }
2566 if (ioc->Request.Type.Direction == XFER_WRITE) {
2567 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
2568 status = -ENOMEM;
2569 goto cleanup1;
2570 }
2571 } else
2572 memset(buff[sg_used], 0, sz);
2573 left -= sz;
2574 data_ptr += sz;
2575 sg_used++;
2576 }
2577 c = cmd_special_alloc(h);
2578 if (c == NULL) {
2579 status = -ENOMEM;
2580 goto cleanup1;
2581 }
2582 c->cmd_type = CMD_IOCTL_PEND;
2583 c->Header.ReplyQueue = 0;
2584
2585 if (ioc->buf_size > 0) {
2586 c->Header.SGList = sg_used;
2587 c->Header.SGTotal = sg_used;
2588 } else {
2589 c->Header.SGList = 0;
2590 c->Header.SGTotal = 0;
2591 }
2592 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
2593 c->Header.Tag.lower = c->busaddr;
2594 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
2595 if (ioc->buf_size > 0) {
2596 int i;
2597 for (i = 0; i < sg_used; i++) {
2598 temp64.val = pci_map_single(h->pdev, buff[i],
2599 buff_size[i], PCI_DMA_BIDIRECTIONAL);
2600 c->SG[i].Addr.lower = temp64.val32.lower;
2601 c->SG[i].Addr.upper = temp64.val32.upper;
2602 c->SG[i].Len = buff_size[i];
2603
2604 c->SG[i].Ext = 0;
2605 }
2606 }
2607 hpsa_scsi_do_simple_cmd_core(h, c);
2608 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
2609 check_ioctl_unit_attention(h, c);
2610
2611 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
2612 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
2613 cmd_special_free(h, c);
2614 status = -EFAULT;
2615 goto cleanup1;
2616 }
2617 if (ioc->Request.Type.Direction == XFER_READ) {
2618
2619 BYTE __user *ptr = ioc->buf;
2620 for (i = 0; i < sg_used; i++) {
2621 if (copy_to_user(ptr, buff[i], buff_size[i])) {
2622 cmd_special_free(h, c);
2623 status = -EFAULT;
2624 goto cleanup1;
2625 }
2626 ptr += buff_size[i];
2627 }
2628 }
2629 cmd_special_free(h, c);
2630 status = 0;
2631cleanup1:
2632 if (buff) {
2633 for (i = 0; i < sg_used; i++)
2634 kfree(buff[i]);
2635 kfree(buff);
2636 }
2637 kfree(buff_size);
2638 kfree(ioc);
2639 return status;
2640}
2641
2642static void check_ioctl_unit_attention(struct ctlr_info *h,
2643 struct CommandList *c)
2644{
2645 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2646 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
2647 (void) check_for_unit_attention(h, c);
2648}
2649
2650
2651
2652static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
2653{
2654 struct ctlr_info *h;
2655 void __user *argp = (void __user *)arg;
2656
2657 h = sdev_to_hba(dev);
2658
2659 switch (cmd) {
2660 case CCISS_DEREGDISK:
2661 case CCISS_REGNEWDISK:
2662 case CCISS_REGNEWD:
2663 hpsa_scan_start(h->scsi_host);
2664 return 0;
2665 case CCISS_GETPCIINFO:
2666 return hpsa_getpciinfo_ioctl(h, argp);
2667 case CCISS_GETDRIVVER:
2668 return hpsa_getdrivver_ioctl(h, argp);
2669 case CCISS_PASSTHRU:
2670 return hpsa_passthru_ioctl(h, argp);
2671 case CCISS_BIG_PASSTHRU:
2672 return hpsa_big_passthru_ioctl(h, argp);
2673 default:
2674 return -ENOTTY;
2675 }
2676}
2677
2678static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
2679 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
2680 int cmd_type)
2681{
2682 int pci_dir = XFER_NONE;
2683
2684 c->cmd_type = CMD_IOCTL_PEND;
2685 c->Header.ReplyQueue = 0;
2686 if (buff != NULL && size > 0) {
2687 c->Header.SGList = 1;
2688 c->Header.SGTotal = 1;
2689 } else {
2690 c->Header.SGList = 0;
2691 c->Header.SGTotal = 0;
2692 }
2693 c->Header.Tag.lower = c->busaddr;
2694 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
2695
2696 c->Request.Type.Type = cmd_type;
2697 if (cmd_type == TYPE_CMD) {
2698 switch (cmd) {
2699 case HPSA_INQUIRY:
2700
2701 if (page_code != 0) {
2702 c->Request.CDB[1] = 0x01;
2703 c->Request.CDB[2] = page_code;
2704 }
2705 c->Request.CDBLen = 6;
2706 c->Request.Type.Attribute = ATTR_SIMPLE;
2707 c->Request.Type.Direction = XFER_READ;
2708 c->Request.Timeout = 0;
2709 c->Request.CDB[0] = HPSA_INQUIRY;
2710 c->Request.CDB[4] = size & 0xFF;
2711 break;
2712 case HPSA_REPORT_LOG:
2713 case HPSA_REPORT_PHYS:
2714
2715
2716
2717 c->Request.CDBLen = 12;
2718 c->Request.Type.Attribute = ATTR_SIMPLE;
2719 c->Request.Type.Direction = XFER_READ;
2720 c->Request.Timeout = 0;
2721 c->Request.CDB[0] = cmd;
2722 c->Request.CDB[6] = (size >> 24) & 0xFF;
2723 c->Request.CDB[7] = (size >> 16) & 0xFF;
2724 c->Request.CDB[8] = (size >> 8) & 0xFF;
2725 c->Request.CDB[9] = size & 0xFF;
2726 break;
2727 case HPSA_CACHE_FLUSH:
2728 c->Request.CDBLen = 12;
2729 c->Request.Type.Attribute = ATTR_SIMPLE;
2730 c->Request.Type.Direction = XFER_WRITE;
2731 c->Request.Timeout = 0;
2732 c->Request.CDB[0] = BMIC_WRITE;
2733 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
2734 break;
2735 case TEST_UNIT_READY:
2736 c->Request.CDBLen = 6;
2737 c->Request.Type.Attribute = ATTR_SIMPLE;
2738 c->Request.Type.Direction = XFER_NONE;
2739 c->Request.Timeout = 0;
2740 break;
2741 default:
2742 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
2743 BUG();
2744 return;
2745 }
2746 } else if (cmd_type == TYPE_MSG) {
2747 switch (cmd) {
2748
2749 case HPSA_DEVICE_RESET_MSG:
2750 c->Request.CDBLen = 16;
2751 c->Request.Type.Type = 1;
2752 c->Request.Type.Attribute = ATTR_SIMPLE;
2753 c->Request.Type.Direction = XFER_NONE;
2754 c->Request.Timeout = 0;
2755 c->Request.CDB[0] = 0x01;
2756 c->Request.CDB[1] = 0x03;
2757
2758
2759 c->Request.CDB[4] = 0x00;
2760 c->Request.CDB[5] = 0x00;
2761 c->Request.CDB[6] = 0x00;
2762 c->Request.CDB[7] = 0x00;
2763 break;
2764
2765 default:
2766 dev_warn(&h->pdev->dev, "unknown message type %d\n",
2767 cmd);
2768 BUG();
2769 }
2770 } else {
2771 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
2772 BUG();
2773 }
2774
2775 switch (c->Request.Type.Direction) {
2776 case XFER_READ:
2777 pci_dir = PCI_DMA_FROMDEVICE;
2778 break;
2779 case XFER_WRITE:
2780 pci_dir = PCI_DMA_TODEVICE;
2781 break;
2782 case XFER_NONE:
2783 pci_dir = PCI_DMA_NONE;
2784 break;
2785 default:
2786 pci_dir = PCI_DMA_BIDIRECTIONAL;
2787 }
2788
2789 hpsa_map_one(h->pdev, c, buff, size, pci_dir);
2790
2791 return;
2792}
2793
2794
2795
2796
2797static void __iomem *remap_pci_mem(ulong base, ulong size)
2798{
2799 ulong page_base = ((ulong) base) & PAGE_MASK;
2800 ulong page_offs = ((ulong) base) - page_base;
2801 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2802
2803 return page_remapped ? (page_remapped + page_offs) : NULL;
2804}
2805
2806
2807
2808
2809static void start_io(struct ctlr_info *h)
2810{
2811 struct CommandList *c;
2812
2813 while (!hlist_empty(&h->reqQ)) {
2814 c = hlist_entry(h->reqQ.first, struct CommandList, list);
2815
2816 if ((h->access.fifo_full(h))) {
2817 dev_warn(&h->pdev->dev, "fifo full\n");
2818 break;
2819 }
2820
2821
2822 removeQ(c);
2823 h->Qdepth--;
2824
2825
2826 h->access.submit_command(h, c);
2827
2828
2829 addQ(&h->cmpQ, c);
2830 }
2831}
2832
2833static inline unsigned long get_next_completion(struct ctlr_info *h)
2834{
2835 return h->access.command_completed(h);
2836}
2837
2838static inline bool interrupt_pending(struct ctlr_info *h)
2839{
2840 return h->access.intr_pending(h);
2841}
2842
2843static inline long interrupt_not_for_us(struct ctlr_info *h)
2844{
2845 return (h->access.intr_pending(h) == 0) ||
2846 (h->interrupts_enabled == 0);
2847}
2848
2849static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
2850 u32 raw_tag)
2851{
2852 if (unlikely(tag_index >= h->nr_cmds)) {
2853 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
2854 return 1;
2855 }
2856 return 0;
2857}
2858
2859static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
2860{
2861 removeQ(c);
2862 if (likely(c->cmd_type == CMD_SCSI))
2863 complete_scsi_command(c, 0, raw_tag);
2864 else if (c->cmd_type == CMD_IOCTL_PEND)
2865 complete(c->waiting);
2866}
2867
2868static inline u32 hpsa_tag_contains_index(u32 tag)
2869{
2870#define DIRECT_LOOKUP_BIT 0x10
2871 return tag & DIRECT_LOOKUP_BIT;
2872}
2873
2874static inline u32 hpsa_tag_to_index(u32 tag)
2875{
2876#define DIRECT_LOOKUP_SHIFT 5
2877 return tag >> DIRECT_LOOKUP_SHIFT;
2878}
2879
2880static inline u32 hpsa_tag_discard_error_bits(u32 tag)
2881{
2882#define HPSA_ERROR_BITS 0x03
2883 return tag & ~HPSA_ERROR_BITS;
2884}
2885
2886
2887static inline u32 process_indexed_cmd(struct ctlr_info *h,
2888 u32 raw_tag)
2889{
2890 u32 tag_index;
2891 struct CommandList *c;
2892
2893 tag_index = hpsa_tag_to_index(raw_tag);
2894 if (bad_tag(h, tag_index, raw_tag))
2895 return next_command(h);
2896 c = h->cmd_pool + tag_index;
2897 finish_cmd(c, raw_tag);
2898 return next_command(h);
2899}
2900
2901
2902static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
2903 u32 raw_tag)
2904{
2905 u32 tag;
2906 struct CommandList *c = NULL;
2907 struct hlist_node *tmp;
2908
2909 tag = hpsa_tag_discard_error_bits(raw_tag);
2910 hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
2911 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
2912 finish_cmd(c, raw_tag);
2913 return next_command(h);
2914 }
2915 }
2916 bad_tag(h, h->nr_cmds + 1, raw_tag);
2917 return next_command(h);
2918}
2919
2920static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id)
2921{
2922 struct ctlr_info *h = dev_id;
2923 unsigned long flags;
2924 u32 raw_tag;
2925
2926 if (interrupt_not_for_us(h))
2927 return IRQ_NONE;
2928 spin_lock_irqsave(&h->lock, flags);
2929 while (interrupt_pending(h)) {
2930 raw_tag = get_next_completion(h);
2931 while (raw_tag != FIFO_EMPTY) {
2932 if (hpsa_tag_contains_index(raw_tag))
2933 raw_tag = process_indexed_cmd(h, raw_tag);
2934 else
2935 raw_tag = process_nonindexed_cmd(h, raw_tag);
2936 }
2937 }
2938 spin_unlock_irqrestore(&h->lock, flags);
2939 return IRQ_HANDLED;
2940}
2941
2942static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id)
2943{
2944 struct ctlr_info *h = dev_id;
2945 unsigned long flags;
2946 u32 raw_tag;
2947
2948 spin_lock_irqsave(&h->lock, flags);
2949 raw_tag = get_next_completion(h);
2950 while (raw_tag != FIFO_EMPTY) {
2951 if (hpsa_tag_contains_index(raw_tag))
2952 raw_tag = process_indexed_cmd(h, raw_tag);
2953 else
2954 raw_tag = process_nonindexed_cmd(h, raw_tag);
2955 }
2956 spin_unlock_irqrestore(&h->lock, flags);
2957 return IRQ_HANDLED;
2958}
2959
2960
2961static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
2962 unsigned char type)
2963{
2964 struct Command {
2965 struct CommandListHeader CommandHeader;
2966 struct RequestBlock Request;
2967 struct ErrDescriptor ErrorDescriptor;
2968 };
2969 struct Command *cmd;
2970 static const size_t cmd_sz = sizeof(*cmd) +
2971 sizeof(cmd->ErrorDescriptor);
2972 dma_addr_t paddr64;
2973 uint32_t paddr32, tag;
2974 void __iomem *vaddr;
2975 int i, err;
2976
2977 vaddr = pci_ioremap_bar(pdev, 0);
2978 if (vaddr == NULL)
2979 return -ENOMEM;
2980
2981
2982
2983
2984
2985 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2986 if (err) {
2987 iounmap(vaddr);
2988 return -ENOMEM;
2989 }
2990
2991 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
2992 if (cmd == NULL) {
2993 iounmap(vaddr);
2994 return -ENOMEM;
2995 }
2996
2997
2998
2999
3000
3001 paddr32 = paddr64;
3002
3003 cmd->CommandHeader.ReplyQueue = 0;
3004 cmd->CommandHeader.SGList = 0;
3005 cmd->CommandHeader.SGTotal = 0;
3006 cmd->CommandHeader.Tag.lower = paddr32;
3007 cmd->CommandHeader.Tag.upper = 0;
3008 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
3009
3010 cmd->Request.CDBLen = 16;
3011 cmd->Request.Type.Type = TYPE_MSG;
3012 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
3013 cmd->Request.Type.Direction = XFER_NONE;
3014 cmd->Request.Timeout = 0;
3015 cmd->Request.CDB[0] = opcode;
3016 cmd->Request.CDB[1] = type;
3017 memset(&cmd->Request.CDB[2], 0, 14);
3018 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
3019 cmd->ErrorDescriptor.Addr.upper = 0;
3020 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
3021
3022 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
3023
3024 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
3025 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
3026 if (hpsa_tag_discard_error_bits(tag) == paddr32)
3027 break;
3028 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
3029 }
3030
3031 iounmap(vaddr);
3032
3033
3034
3035
3036 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
3037 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
3038 opcode, type);
3039 return -ETIMEDOUT;
3040 }
3041
3042 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
3043
3044 if (tag & HPSA_ERROR_BIT) {
3045 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
3046 opcode, type);
3047 return -EIO;
3048 }
3049
3050 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
3051 opcode, type);
3052 return 0;
3053}
3054
3055#define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
3056#define hpsa_noop(p) hpsa_message(p, 3, 0)
3057
3058static __devinit int hpsa_reset_msi(struct pci_dev *pdev)
3059{
3060
3061#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
3062#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
3063
3064 int pos;
3065 u16 control = 0;
3066
3067 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
3068 if (pos) {
3069 pci_read_config_word(pdev, msi_control_reg(pos), &control);
3070 if (control & PCI_MSI_FLAGS_ENABLE) {
3071 dev_info(&pdev->dev, "resetting MSI\n");
3072 pci_write_config_word(pdev, msi_control_reg(pos),
3073 control & ~PCI_MSI_FLAGS_ENABLE);
3074 }
3075 }
3076
3077 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
3078 if (pos) {
3079 pci_read_config_word(pdev, msi_control_reg(pos), &control);
3080 if (control & PCI_MSIX_FLAGS_ENABLE) {
3081 dev_info(&pdev->dev, "resetting MSI-X\n");
3082 pci_write_config_word(pdev, msi_control_reg(pos),
3083 control & ~PCI_MSIX_FLAGS_ENABLE);
3084 }
3085 }
3086
3087 return 0;
3088}
3089
3090static int hpsa_controller_hard_reset(struct pci_dev *pdev,
3091 void * __iomem vaddr, bool use_doorbell)
3092{
3093 u16 pmcsr;
3094 int pos;
3095
3096 if (use_doorbell) {
3097
3098
3099
3100
3101 dev_info(&pdev->dev, "using doorbell to reset controller\n");
3102 writel(DOORBELL_CTLR_RESET, vaddr + SA5_DOORBELL);
3103 msleep(1000);
3104 } else {
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
3115 if (pos == 0) {
3116 dev_err(&pdev->dev,
3117 "hpsa_reset_controller: "
3118 "PCI PM not supported\n");
3119 return -ENODEV;
3120 }
3121 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
3122
3123 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
3124 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3125 pmcsr |= PCI_D3hot;
3126 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3127
3128 msleep(500);
3129
3130
3131 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3132 pmcsr |= PCI_D0;
3133 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3134
3135 msleep(500);
3136 }
3137 return 0;
3138}
3139
3140
3141
3142
3143static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3144{
3145 u16 saved_config_space[32];
3146 u64 cfg_offset;
3147 u32 cfg_base_addr;
3148 u64 cfg_base_addr_index;
3149 void __iomem *vaddr;
3150 unsigned long paddr;
3151 u32 misc_fw_support, active_transport;
3152 int rc, i;
3153 struct CfgTable __iomem *cfgtable;
3154 bool use_doorbell;
3155 u32 board_id;
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185 hpsa_lookup_board_id(pdev, &board_id);
3186 if (board_id == 0x409C0E11 || board_id == 0x409D0E11)
3187 return -ENOTSUPP;
3188
3189 for (i = 0; i < 32; i++)
3190 pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
3191
3192
3193
3194 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
3195 if (rc)
3196 return rc;
3197 vaddr = remap_pci_mem(paddr, 0x250);
3198 if (!vaddr)
3199 return -ENOMEM;
3200
3201
3202 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
3203 &cfg_base_addr_index, &cfg_offset);
3204 if (rc)
3205 goto unmap_vaddr;
3206 cfgtable = remap_pci_mem(pci_resource_start(pdev,
3207 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
3208 if (!cfgtable) {
3209 rc = -ENOMEM;
3210 goto unmap_vaddr;
3211 }
3212
3213
3214 misc_fw_support = readl(&cfgtable->misc_fw_support);
3215 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
3216
3217
3218
3219
3220
3221 use_doorbell = 0;
3222
3223 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
3224 if (rc)
3225 goto unmap_cfgtable;
3226
3227
3228
3229
3230
3231
3232
3233
3234 for (i = 0; i < 32; i++) {
3235 if (i == 2 || i == 3)
3236 continue;
3237 pci_write_config_word(pdev, 2*i, saved_config_space[i]);
3238 }
3239 wmb();
3240 pci_write_config_word(pdev, 4, saved_config_space[2]);
3241
3242
3243
3244 msleep(HPSA_POST_RESET_PAUSE_MSECS);
3245
3246
3247
3248
3249
3250
3251
3252 active_transport = readl(&cfgtable->TransportActive);
3253 if (active_transport & PERFORMANT_MODE) {
3254 dev_warn(&pdev->dev, "Unable to successfully reset controller,"
3255 " proceeding anyway.\n");
3256 rc = -ENOTSUPP;
3257 }
3258
3259unmap_cfgtable:
3260 iounmap(cfgtable);
3261
3262unmap_vaddr:
3263 iounmap(vaddr);
3264 return rc;
3265}
3266
3267
3268
3269
3270
3271
3272static void print_cfg_table(struct device *dev, struct CfgTable *tb)
3273{
3274#ifdef HPSA_DEBUG
3275 int i;
3276 char temp_name[17];
3277
3278 dev_info(dev, "Controller Configuration information\n");
3279 dev_info(dev, "------------------------------------\n");
3280 for (i = 0; i < 4; i++)
3281 temp_name[i] = readb(&(tb->Signature[i]));
3282 temp_name[4] = '\0';
3283 dev_info(dev, " Signature = %s\n", temp_name);
3284 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
3285 dev_info(dev, " Transport methods supported = 0x%x\n",
3286 readl(&(tb->TransportSupport)));
3287 dev_info(dev, " Transport methods active = 0x%x\n",
3288 readl(&(tb->TransportActive)));
3289 dev_info(dev, " Requested transport Method = 0x%x\n",
3290 readl(&(tb->HostWrite.TransportRequest)));
3291 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
3292 readl(&(tb->HostWrite.CoalIntDelay)));
3293 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
3294 readl(&(tb->HostWrite.CoalIntCount)));
3295 dev_info(dev, " Max outstanding commands = 0x%d\n",
3296 readl(&(tb->CmdsOutMax)));
3297 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
3298 for (i = 0; i < 16; i++)
3299 temp_name[i] = readb(&(tb->ServerName[i]));
3300 temp_name[16] = '\0';
3301 dev_info(dev, " Server Name = %s\n", temp_name);
3302 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
3303 readl(&(tb->HeartBeat)));
3304#endif
3305}
3306
3307static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3308{
3309 int i, offset, mem_type, bar_type;
3310
3311 if (pci_bar_addr == PCI_BASE_ADDRESS_0)
3312 return 0;
3313 offset = 0;
3314 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3315 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
3316 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
3317 offset += 4;
3318 else {
3319 mem_type = pci_resource_flags(pdev, i) &
3320 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
3321 switch (mem_type) {
3322 case PCI_BASE_ADDRESS_MEM_TYPE_32:
3323 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
3324 offset += 4;
3325 break;
3326 case PCI_BASE_ADDRESS_MEM_TYPE_64:
3327 offset += 8;
3328 break;
3329 default:
3330 dev_warn(&pdev->dev,
3331 "base address is invalid\n");
3332 return -1;
3333 break;
3334 }
3335 }
3336 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
3337 return i + 1;
3338 }
3339 return -1;
3340}
3341
3342
3343
3344
3345
3346static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
3347{
3348#ifdef CONFIG_PCI_MSI
3349 int err;
3350 struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1},
3351 {0, 2}, {0, 3}
3352 };
3353
3354
3355 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
3356 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
3357 goto default_int_mode;
3358 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
3359 dev_info(&h->pdev->dev, "MSIX\n");
3360 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 4);
3361 if (!err) {
3362 h->intr[0] = hpsa_msix_entries[0].vector;
3363 h->intr[1] = hpsa_msix_entries[1].vector;
3364 h->intr[2] = hpsa_msix_entries[2].vector;
3365 h->intr[3] = hpsa_msix_entries[3].vector;
3366 h->msix_vector = 1;
3367 return;
3368 }
3369 if (err > 0) {
3370 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
3371 "available\n", err);
3372 goto default_int_mode;
3373 } else {
3374 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
3375 err);
3376 goto default_int_mode;
3377 }
3378 }
3379 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
3380 dev_info(&h->pdev->dev, "MSI\n");
3381 if (!pci_enable_msi(h->pdev))
3382 h->msi_vector = 1;
3383 else
3384 dev_warn(&h->pdev->dev, "MSI init failed\n");
3385 }
3386default_int_mode:
3387#endif
3388
3389 h->intr[PERF_MODE_INT] = h->pdev->irq;
3390}
3391
3392static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
3393{
3394 int i;
3395 u32 subsystem_vendor_id, subsystem_device_id;
3396
3397 subsystem_vendor_id = pdev->subsystem_vendor;
3398 subsystem_device_id = pdev->subsystem_device;
3399 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
3400 subsystem_vendor_id;
3401
3402 for (i = 0; i < ARRAY_SIZE(products); i++)
3403 if (*board_id == products[i].board_id)
3404 return i;
3405
3406 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
3407 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
3408 !hpsa_allow_any) {
3409 dev_warn(&pdev->dev, "unrecognized board ID: "
3410 "0x%08x, ignoring.\n", *board_id);
3411 return -ENODEV;
3412 }
3413 return ARRAY_SIZE(products) - 1;
3414}
3415
3416static inline bool hpsa_board_disabled(struct pci_dev *pdev)
3417{
3418 u16 command;
3419
3420 (void) pci_read_config_word(pdev, PCI_COMMAND, &command);
3421 return ((command & PCI_COMMAND_MEMORY) == 0);
3422}
3423
3424static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
3425 unsigned long *memory_bar)
3426{
3427 int i;
3428
3429 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
3430 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3431
3432 *memory_bar = pci_resource_start(pdev, i);
3433 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
3434 *memory_bar);
3435 return 0;
3436 }
3437 dev_warn(&pdev->dev, "no memory BAR found\n");
3438 return -ENODEV;
3439}
3440
3441static int __devinit hpsa_wait_for_board_ready(struct ctlr_info *h)
3442{
3443 int i;
3444 u32 scratchpad;
3445
3446 for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) {
3447 scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
3448 if (scratchpad == HPSA_FIRMWARE_READY)
3449 return 0;
3450 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
3451 }
3452 dev_warn(&h->pdev->dev, "board not ready, timed out.\n");
3453 return -ENODEV;
3454}
3455
3456static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
3457 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
3458 u64 *cfg_offset)
3459{
3460 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
3461 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
3462 *cfg_base_addr &= (u32) 0x0000ffff;
3463 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
3464 if (*cfg_base_addr_index == -1) {
3465 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
3466 return -ENODEV;
3467 }
3468 return 0;
3469}
3470
3471static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
3472{
3473 u64 cfg_offset;
3474 u32 cfg_base_addr;
3475 u64 cfg_base_addr_index;
3476 u32 trans_offset;
3477 int rc;
3478
3479 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
3480 &cfg_base_addr_index, &cfg_offset);
3481 if (rc)
3482 return rc;
3483 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
3484 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
3485 if (!h->cfgtable)
3486 return -ENOMEM;
3487
3488 trans_offset = readl(&h->cfgtable->TransMethodOffset);
3489 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
3490 cfg_base_addr_index)+cfg_offset+trans_offset,
3491 sizeof(*h->transtable));
3492 if (!h->transtable)
3493 return -ENOMEM;
3494 return 0;
3495}
3496
3497static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
3498{
3499 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
3500 if (h->max_commands < 16) {
3501 dev_warn(&h->pdev->dev, "Controller reports "
3502 "max supported commands of %d, an obvious lie. "
3503 "Using 16. Ensure that firmware is up to date.\n",
3504 h->max_commands);
3505 h->max_commands = 16;
3506 }
3507}
3508
3509
3510
3511
3512
3513static void __devinit hpsa_find_board_params(struct ctlr_info *h)
3514{
3515 hpsa_get_max_perf_mode_cmds(h);
3516 h->nr_cmds = h->max_commands - 4;
3517 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
3518
3519
3520
3521
3522 h->max_cmd_sg_entries = 31;
3523 if (h->maxsgentries > 512) {
3524 h->max_cmd_sg_entries = 32;
3525 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
3526 h->maxsgentries--;
3527 } else {
3528 h->maxsgentries = 31;
3529 h->chainsize = 0;
3530 }
3531}
3532
3533static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
3534{
3535 if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
3536 (readb(&h->cfgtable->Signature[1]) != 'I') ||
3537 (readb(&h->cfgtable->Signature[2]) != 'S') ||
3538 (readb(&h->cfgtable->Signature[3]) != 'S')) {
3539 dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
3540 return false;
3541 }
3542 return true;
3543}
3544
3545
3546static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h)
3547{
3548#ifdef CONFIG_X86
3549 u32 prefetch;
3550
3551 prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
3552 prefetch |= 0x100;
3553 writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
3554#endif
3555}
3556
3557
3558
3559
3560static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
3561{
3562 u32 dma_prefetch;
3563
3564 if (h->board_id != 0x3225103C)
3565 return;
3566 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
3567 dma_prefetch |= 0x8000;
3568 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
3569}
3570
3571static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
3572{
3573 int i;
3574
3575
3576
3577
3578
3579 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3580 if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3581 break;
3582
3583 msleep(10);
3584 }
3585}
3586
3587static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
3588{
3589 u32 trans_support;
3590
3591 trans_support = readl(&(h->cfgtable->TransportSupport));
3592 if (!(trans_support & SIMPLE_MODE))
3593 return -ENOTSUPP;
3594
3595 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
3596
3597 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
3598 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3599 hpsa_wait_for_mode_change_ack(h);
3600 print_cfg_table(&h->pdev->dev, h->cfgtable);
3601 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3602 dev_warn(&h->pdev->dev,
3603 "unable to get board into simple mode\n");
3604 return -ENODEV;
3605 }
3606 return 0;
3607}
3608
3609static int __devinit hpsa_pci_init(struct ctlr_info *h)
3610{
3611 int prod_index, err;
3612
3613 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
3614 if (prod_index < 0)
3615 return -ENODEV;
3616 h->product_name = products[prod_index].product_name;
3617 h->access = *(products[prod_index].access);
3618
3619 if (hpsa_board_disabled(h->pdev)) {
3620 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
3621 return -ENODEV;
3622 }
3623 err = pci_enable_device(h->pdev);
3624 if (err) {
3625 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
3626 return err;
3627 }
3628
3629 err = pci_request_regions(h->pdev, "hpsa");
3630 if (err) {
3631 dev_err(&h->pdev->dev,
3632 "cannot obtain PCI resources, aborting\n");
3633 return err;
3634 }
3635 hpsa_interrupt_mode(h);
3636 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
3637 if (err)
3638 goto err_out_free_res;
3639 h->vaddr = remap_pci_mem(h->paddr, 0x250);
3640 if (!h->vaddr) {
3641 err = -ENOMEM;
3642 goto err_out_free_res;
3643 }
3644 err = hpsa_wait_for_board_ready(h);
3645 if (err)
3646 goto err_out_free_res;
3647 err = hpsa_find_cfgtables(h);
3648 if (err)
3649 goto err_out_free_res;
3650 hpsa_find_board_params(h);
3651
3652 if (!hpsa_CISS_signature_present(h)) {
3653 err = -ENODEV;
3654 goto err_out_free_res;
3655 }
3656 hpsa_enable_scsi_prefetch(h);
3657 hpsa_p600_dma_prefetch_quirk(h);
3658 err = hpsa_enter_simple_mode(h);
3659 if (err)
3660 goto err_out_free_res;
3661 return 0;
3662
3663err_out_free_res:
3664 if (h->transtable)
3665 iounmap(h->transtable);
3666 if (h->cfgtable)
3667 iounmap(h->cfgtable);
3668 if (h->vaddr)
3669 iounmap(h->vaddr);
3670
3671
3672
3673
3674 pci_release_regions(h->pdev);
3675 return err;
3676}
3677
3678static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
3679{
3680 int rc;
3681
3682#define HBA_INQUIRY_BYTE_COUNT 64
3683 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
3684 if (!h->hba_inquiry_data)
3685 return;
3686 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
3687 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
3688 if (rc != 0) {
3689 kfree(h->hba_inquiry_data);
3690 h->hba_inquiry_data = NULL;
3691 }
3692}
3693
3694static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
3695{
3696 int rc, i;
3697
3698 if (!reset_devices)
3699 return 0;
3700
3701
3702 rc = hpsa_kdump_hard_reset_controller(pdev);
3703
3704
3705
3706
3707
3708
3709 if (rc == -ENOTSUPP)
3710 return 0;
3711 if (rc)
3712 return -ENODEV;
3713 if (hpsa_reset_msi(pdev))
3714 return -ENODEV;
3715
3716
3717 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
3718 if (hpsa_noop(pdev) == 0)
3719 break;
3720 else
3721 dev_warn(&pdev->dev, "no-op failed%s\n",
3722 (i < 11 ? "; re-trying" : ""));
3723 }
3724 return 0;
3725}
3726
3727static int __devinit hpsa_init_one(struct pci_dev *pdev,
3728 const struct pci_device_id *ent)
3729{
3730 int dac, rc;
3731 struct ctlr_info *h;
3732
3733 if (number_of_controllers == 0)
3734 printk(KERN_INFO DRIVER_NAME "\n");
3735
3736 rc = hpsa_init_reset_devices(pdev);
3737 if (rc)
3738 return rc;
3739
3740
3741
3742
3743
3744#define COMMANDLIST_ALIGNMENT 32
3745 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
3746 h = kzalloc(sizeof(*h), GFP_KERNEL);
3747 if (!h)
3748 return -ENOMEM;
3749
3750 h->pdev = pdev;
3751 h->busy_initializing = 1;
3752 INIT_HLIST_HEAD(&h->cmpQ);
3753 INIT_HLIST_HEAD(&h->reqQ);
3754 rc = hpsa_pci_init(h);
3755 if (rc != 0)
3756 goto clean1;
3757
3758 sprintf(h->devname, "hpsa%d", number_of_controllers);
3759 h->ctlr = number_of_controllers;
3760 number_of_controllers++;
3761
3762
3763 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3764 if (rc == 0) {
3765 dac = 1;
3766 } else {
3767 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3768 if (rc == 0) {
3769 dac = 0;
3770 } else {
3771 dev_err(&pdev->dev, "no suitable DMA available\n");
3772 goto clean1;
3773 }
3774 }
3775
3776
3777 h->access.set_intr_mask(h, HPSA_INTR_OFF);
3778
3779 if (h->msix_vector || h->msi_vector)
3780 rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_msi,
3781 IRQF_DISABLED, h->devname, h);
3782 else
3783 rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_intx,
3784 IRQF_DISABLED, h->devname, h);
3785 if (rc) {
3786 dev_err(&pdev->dev, "unable to get irq %d for %s\n",
3787 h->intr[PERF_MODE_INT], h->devname);
3788 goto clean2;
3789 }
3790
3791 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
3792 h->devname, pdev->device,
3793 h->intr[PERF_MODE_INT], dac ? "" : " not");
3794
3795 h->cmd_pool_bits =
3796 kmalloc(((h->nr_cmds + BITS_PER_LONG -
3797 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3798 h->cmd_pool = pci_alloc_consistent(h->pdev,
3799 h->nr_cmds * sizeof(*h->cmd_pool),
3800 &(h->cmd_pool_dhandle));
3801 h->errinfo_pool = pci_alloc_consistent(h->pdev,
3802 h->nr_cmds * sizeof(*h->errinfo_pool),
3803 &(h->errinfo_pool_dhandle));
3804 if ((h->cmd_pool_bits == NULL)
3805 || (h->cmd_pool == NULL)
3806 || (h->errinfo_pool == NULL)) {
3807 dev_err(&pdev->dev, "out of memory");
3808 rc = -ENOMEM;
3809 goto clean4;
3810 }
3811 if (hpsa_allocate_sg_chain_blocks(h))
3812 goto clean4;
3813 spin_lock_init(&h->lock);
3814 spin_lock_init(&h->scan_lock);
3815 init_waitqueue_head(&h->scan_wait_queue);
3816 h->scan_finished = 1;
3817
3818 pci_set_drvdata(pdev, h);
3819 memset(h->cmd_pool_bits, 0,
3820 ((h->nr_cmds + BITS_PER_LONG -
3821 1) / BITS_PER_LONG) * sizeof(unsigned long));
3822
3823 hpsa_scsi_setup(h);
3824
3825
3826 h->access.set_intr_mask(h, HPSA_INTR_ON);
3827
3828 hpsa_put_ctlr_into_performant_mode(h);
3829 hpsa_hba_inquiry(h);
3830 hpsa_register_scsi(h);
3831 h->busy_initializing = 0;
3832 return 1;
3833
3834clean4:
3835 hpsa_free_sg_chain_blocks(h);
3836 kfree(h->cmd_pool_bits);
3837 if (h->cmd_pool)
3838 pci_free_consistent(h->pdev,
3839 h->nr_cmds * sizeof(struct CommandList),
3840 h->cmd_pool, h->cmd_pool_dhandle);
3841 if (h->errinfo_pool)
3842 pci_free_consistent(h->pdev,
3843 h->nr_cmds * sizeof(struct ErrorInfo),
3844 h->errinfo_pool,
3845 h->errinfo_pool_dhandle);
3846 free_irq(h->intr[PERF_MODE_INT], h);
3847clean2:
3848clean1:
3849 h->busy_initializing = 0;
3850 kfree(h);
3851 return rc;
3852}
3853
3854static void hpsa_flush_cache(struct ctlr_info *h)
3855{
3856 char *flush_buf;
3857 struct CommandList *c;
3858
3859 flush_buf = kzalloc(4, GFP_KERNEL);
3860 if (!flush_buf)
3861 return;
3862
3863 c = cmd_special_alloc(h);
3864 if (!c) {
3865 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
3866 goto out_of_memory;
3867 }
3868 fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
3869 RAID_CTLR_LUNID, TYPE_CMD);
3870 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
3871 if (c->err_info->CommandStatus != 0)
3872 dev_warn(&h->pdev->dev,
3873 "error flushing cache on controller\n");
3874 cmd_special_free(h, c);
3875out_of_memory:
3876 kfree(flush_buf);
3877}
3878
3879static void hpsa_shutdown(struct pci_dev *pdev)
3880{
3881 struct ctlr_info *h;
3882
3883 h = pci_get_drvdata(pdev);
3884
3885
3886
3887
3888 hpsa_flush_cache(h);
3889 h->access.set_intr_mask(h, HPSA_INTR_OFF);
3890 free_irq(h->intr[PERF_MODE_INT], h);
3891#ifdef CONFIG_PCI_MSI
3892 if (h->msix_vector)
3893 pci_disable_msix(h->pdev);
3894 else if (h->msi_vector)
3895 pci_disable_msi(h->pdev);
3896#endif
3897}
3898
3899static void __devexit hpsa_remove_one(struct pci_dev *pdev)
3900{
3901 struct ctlr_info *h;
3902
3903 if (pci_get_drvdata(pdev) == NULL) {
3904 dev_err(&pdev->dev, "unable to remove device \n");
3905 return;
3906 }
3907 h = pci_get_drvdata(pdev);
3908 hpsa_unregister_scsi(h);
3909 hpsa_shutdown(pdev);
3910 iounmap(h->vaddr);
3911 iounmap(h->transtable);
3912 iounmap(h->cfgtable);
3913 hpsa_free_sg_chain_blocks(h);
3914 pci_free_consistent(h->pdev,
3915 h->nr_cmds * sizeof(struct CommandList),
3916 h->cmd_pool, h->cmd_pool_dhandle);
3917 pci_free_consistent(h->pdev,
3918 h->nr_cmds * sizeof(struct ErrorInfo),
3919 h->errinfo_pool, h->errinfo_pool_dhandle);
3920 pci_free_consistent(h->pdev, h->reply_pool_size,
3921 h->reply_pool, h->reply_pool_dhandle);
3922 kfree(h->cmd_pool_bits);
3923 kfree(h->blockFetchTable);
3924 kfree(h->hba_inquiry_data);
3925
3926
3927
3928
3929 pci_release_regions(pdev);
3930 pci_set_drvdata(pdev, NULL);
3931 kfree(h);
3932}
3933
3934static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
3935 __attribute__((unused)) pm_message_t state)
3936{
3937 return -ENOSYS;
3938}
3939
3940static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
3941{
3942 return -ENOSYS;
3943}
3944
3945static struct pci_driver hpsa_pci_driver = {
3946 .name = "hpsa",
3947 .probe = hpsa_init_one,
3948 .remove = __devexit_p(hpsa_remove_one),
3949 .id_table = hpsa_pci_device_id,
3950 .shutdown = hpsa_shutdown,
3951 .suspend = hpsa_suspend,
3952 .resume = hpsa_resume,
3953};
3954
3955
3956
3957
3958
3959
3960
3961
3962
3963
3964
3965
3966
3967static void calc_bucket_map(int bucket[], int num_buckets,
3968 int nsgs, int *bucket_map)
3969{
3970 int i, j, b, size;
3971
3972
3973#define MINIMUM_TRANSFER_BLOCKS 4
3974#define NUM_BUCKETS 8
3975
3976 for (i = 0; i <= nsgs; i++) {
3977
3978 size = i + MINIMUM_TRANSFER_BLOCKS;
3979 b = num_buckets;
3980
3981 for (j = 0; j < 8; j++) {
3982 if (bucket[j] >= size) {
3983 b = j;
3984 break;
3985 }
3986 }
3987
3988 bucket_map[i] = b;
3989 }
3990}
3991
3992static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
3993{
3994 int i;
3995 unsigned long register_value;
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014 int bft[8] = {5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4};
4015 BUILD_BUG_ON(28 > MAXSGENTRIES + 4);
4016
4017
4018
4019
4020
4021
4022 h->reply_pool_wraparound = 1;
4023
4024
4025 memset(h->reply_pool, 0, h->reply_pool_size);
4026 h->reply_pool_head = h->reply_pool;
4027
4028 bft[7] = h->max_sg_entries + 4;
4029 calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable);
4030 for (i = 0; i < 8; i++)
4031 writel(bft[i], &h->transtable->BlockFetch[i]);
4032
4033
4034 writel(h->max_commands, &h->transtable->RepQSize);
4035 writel(1, &h->transtable->RepQCount);
4036 writel(0, &h->transtable->RepQCtrAddrLow32);
4037 writel(0, &h->transtable->RepQCtrAddrHigh32);
4038 writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
4039 writel(0, &h->transtable->RepQAddr0High32);
4040 writel(CFGTBL_Trans_Performant,
4041 &(h->cfgtable->HostWrite.TransportRequest));
4042 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
4043 hpsa_wait_for_mode_change_ack(h);
4044 register_value = readl(&(h->cfgtable->TransportActive));
4045 if (!(register_value & CFGTBL_Trans_Performant)) {
4046 dev_warn(&h->pdev->dev, "unable to get board into"
4047 " performant mode\n");
4048 return;
4049 }
4050}
4051
4052static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
4053{
4054 u32 trans_support;
4055
4056 trans_support = readl(&(h->cfgtable->TransportSupport));
4057 if (!(trans_support & PERFORMANT_MODE))
4058 return;
4059
4060 hpsa_get_max_perf_mode_cmds(h);
4061 h->max_sg_entries = 32;
4062
4063 h->reply_pool_size = h->max_commands * sizeof(u64);
4064 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
4065 &(h->reply_pool_dhandle));
4066
4067
4068 h->blockFetchTable = kmalloc(((h->max_sg_entries+1) *
4069 sizeof(u32)), GFP_KERNEL);
4070
4071 if ((h->reply_pool == NULL)
4072 || (h->blockFetchTable == NULL))
4073 goto clean_up;
4074
4075 hpsa_enter_performant_mode(h);
4076
4077
4078 h->access = SA5_performant_access;
4079 h->transMethod = CFGTBL_Trans_Performant;
4080
4081 return;
4082
4083clean_up:
4084 if (h->reply_pool)
4085 pci_free_consistent(h->pdev, h->reply_pool_size,
4086 h->reply_pool, h->reply_pool_dhandle);
4087 kfree(h->blockFetchTable);
4088}
4089
4090
4091
4092
4093
4094static int __init hpsa_init(void)
4095{
4096 return pci_register_driver(&hpsa_pci_driver);
4097}
4098
4099static void __exit hpsa_cleanup(void)
4100{
4101 pci_unregister_driver(&hpsa_pci_driver);
4102}
4103
4104module_init(hpsa_init);
4105module_exit(hpsa_cleanup);
4106