1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/module.h>
23#include <linux/interrupt.h>
24#include <linux/types.h>
25#include <linux/pci.h>
26#include <linux/pci-aspm.h>
27#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/delay.h>
30#include <linux/fs.h>
31#include <linux/timer.h>
32#include <linux/seq_file.h>
33#include <linux/init.h>
34#include <linux/spinlock.h>
35#include <linux/compat.h>
36#include <linux/blktrace_api.h>
37#include <linux/uaccess.h>
38#include <linux/io.h>
39#include <linux/dma-mapping.h>
40#include <linux/completion.h>
41#include <linux/moduleparam.h>
42#include <scsi/scsi.h>
43#include <scsi/scsi_cmnd.h>
44#include <scsi/scsi_device.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_tcq.h>
47#include <linux/cciss_ioctl.h>
48#include <linux/string.h>
49#include <linux/bitmap.h>
50#include <linux/atomic.h>
51#include <linux/kthread.h>
52#include <linux/jiffies.h>
53#include "hpsa_cmd.h"
54#include "hpsa.h"
55
56
57#define HPSA_DRIVER_VERSION "2.0.2-1"
58#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
59#define HPSA "hpsa"
60
61
62#define MAX_CONFIG_WAIT 30000
63#define MAX_IOCTL_CONFIG_WAIT 1000
64
65
66#define MAX_CMD_RETRIES 3
67
68
69MODULE_AUTHOR("Hewlett-Packard Company");
70MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
71 HPSA_DRIVER_VERSION);
72MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
73MODULE_VERSION(HPSA_DRIVER_VERSION);
74MODULE_LICENSE("GPL");
75
76static int hpsa_allow_any;
77module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
78MODULE_PARM_DESC(hpsa_allow_any,
79 "Allow hpsa driver to access unknown HP Smart Array hardware");
80static int hpsa_simple_mode;
81module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
82MODULE_PARM_DESC(hpsa_simple_mode,
83 "Use 'simple mode' rather than 'performant mode'");
84
85
86static const struct pci_device_id hpsa_pci_device_id[] = {
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1920},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334d},
111 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
112 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
113 {0,}
114};
115
116MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
117
118
119
120
121
122static struct board_type products[] = {
123 {0x3241103C, "Smart Array P212", &SA5_access},
124 {0x3243103C, "Smart Array P410", &SA5_access},
125 {0x3245103C, "Smart Array P410i", &SA5_access},
126 {0x3247103C, "Smart Array P411", &SA5_access},
127 {0x3249103C, "Smart Array P812", &SA5_access},
128 {0x324a103C, "Smart Array P712m", &SA5_access},
129 {0x324b103C, "Smart Array P711m", &SA5_access},
130 {0x3350103C, "Smart Array P222", &SA5_access},
131 {0x3351103C, "Smart Array P420", &SA5_access},
132 {0x3352103C, "Smart Array P421", &SA5_access},
133 {0x3353103C, "Smart Array P822", &SA5_access},
134 {0x3354103C, "Smart Array P420i", &SA5_access},
135 {0x3355103C, "Smart Array P220i", &SA5_access},
136 {0x3356103C, "Smart Array P721m", &SA5_access},
137 {0x1920103C, "Smart Array", &SA5_access},
138 {0x1921103C, "Smart Array", &SA5_access},
139 {0x1922103C, "Smart Array", &SA5_access},
140 {0x1923103C, "Smart Array", &SA5_access},
141 {0x1924103C, "Smart Array", &SA5_access},
142 {0x1925103C, "Smart Array", &SA5_access},
143 {0x1926103C, "Smart Array", &SA5_access},
144 {0x1928103C, "Smart Array", &SA5_access},
145 {0x334d103C, "Smart Array P822se", &SA5_access},
146 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
147};
148
149static int number_of_controllers;
150
151static struct list_head hpsa_ctlr_list = LIST_HEAD_INIT(hpsa_ctlr_list);
152static spinlock_t lockup_detector_lock;
153static struct task_struct *hpsa_lockup_detector;
154
155static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
156static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
157static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
158static void start_io(struct ctlr_info *h);
159
160#ifdef CONFIG_COMPAT
161static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
162#endif
163
164static void cmd_free(struct ctlr_info *h, struct CommandList *c);
165static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
166static struct CommandList *cmd_alloc(struct ctlr_info *h);
167static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
168static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
169 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
170 int cmd_type);
171
172static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
173static void hpsa_scan_start(struct Scsi_Host *);
174static int hpsa_scan_finished(struct Scsi_Host *sh,
175 unsigned long elapsed_time);
176static int hpsa_change_queue_depth(struct scsi_device *sdev,
177 int qdepth, int reason);
178
179static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
180static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
181static int hpsa_slave_alloc(struct scsi_device *sdev);
182static void hpsa_slave_destroy(struct scsi_device *sdev);
183
184static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
185static int check_for_unit_attention(struct ctlr_info *h,
186 struct CommandList *c);
187static void check_ioctl_unit_attention(struct ctlr_info *h,
188 struct CommandList *c);
189
190static void calc_bucket_map(int *bucket, int num_buckets,
191 int nsgs, int *bucket_map);
192static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
193static inline u32 next_command(struct ctlr_info *h, u8 q);
194static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
195 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
196 u64 *cfg_offset);
197static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
198 unsigned long *memory_bar);
199static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
200static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
201 int wait_for_ready);
202static inline void finish_cmd(struct CommandList *c);
203#define BOARD_NOT_READY 0
204#define BOARD_READY 1
205
206static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
207{
208 unsigned long *priv = shost_priv(sdev->host);
209 return (struct ctlr_info *) *priv;
210}
211
212static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
213{
214 unsigned long *priv = shost_priv(sh);
215 return (struct ctlr_info *) *priv;
216}
217
218static int check_for_unit_attention(struct ctlr_info *h,
219 struct CommandList *c)
220{
221 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
222 return 0;
223
224 switch (c->err_info->SenseInfo[12]) {
225 case STATE_CHANGED:
226 dev_warn(&h->pdev->dev, HPSA "%d: a state change "
227 "detected, command retried\n", h->ctlr);
228 break;
229 case LUN_FAILED:
230 dev_warn(&h->pdev->dev, HPSA "%d: LUN failure "
231 "detected, action required\n", h->ctlr);
232 break;
233 case REPORT_LUNS_CHANGED:
234 dev_warn(&h->pdev->dev, HPSA "%d: report LUN data "
235 "changed, action required\n", h->ctlr);
236
237
238
239
240 break;
241 case POWER_OR_RESET:
242 dev_warn(&h->pdev->dev, HPSA "%d: a power on "
243 "or device reset detected\n", h->ctlr);
244 break;
245 case UNIT_ATTENTION_CLEARED:
246 dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
247 "cleared by another initiator\n", h->ctlr);
248 break;
249 default:
250 dev_warn(&h->pdev->dev, HPSA "%d: unknown "
251 "unit attention detected\n", h->ctlr);
252 break;
253 }
254 return 1;
255}
256
257static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
258{
259 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
260 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
261 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
262 return 0;
263 dev_warn(&h->pdev->dev, HPSA "device busy");
264 return 1;
265}
266
267static ssize_t host_store_rescan(struct device *dev,
268 struct device_attribute *attr,
269 const char *buf, size_t count)
270{
271 struct ctlr_info *h;
272 struct Scsi_Host *shost = class_to_shost(dev);
273 h = shost_to_hba(shost);
274 hpsa_scan_start(h->scsi_host);
275 return count;
276}
277
278static ssize_t host_show_firmware_revision(struct device *dev,
279 struct device_attribute *attr, char *buf)
280{
281 struct ctlr_info *h;
282 struct Scsi_Host *shost = class_to_shost(dev);
283 unsigned char *fwrev;
284
285 h = shost_to_hba(shost);
286 if (!h->hba_inquiry_data)
287 return 0;
288 fwrev = &h->hba_inquiry_data[32];
289 return snprintf(buf, 20, "%c%c%c%c\n",
290 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
291}
292
293static ssize_t host_show_commands_outstanding(struct device *dev,
294 struct device_attribute *attr, char *buf)
295{
296 struct Scsi_Host *shost = class_to_shost(dev);
297 struct ctlr_info *h = shost_to_hba(shost);
298
299 return snprintf(buf, 20, "%d\n", h->commands_outstanding);
300}
301
302static ssize_t host_show_transport_mode(struct device *dev,
303 struct device_attribute *attr, char *buf)
304{
305 struct ctlr_info *h;
306 struct Scsi_Host *shost = class_to_shost(dev);
307
308 h = shost_to_hba(shost);
309 return snprintf(buf, 20, "%s\n",
310 h->transMethod & CFGTBL_Trans_Performant ?
311 "performant" : "simple");
312}
313
314
315static u32 unresettable_controller[] = {
316 0x324a103C,
317 0x324b103C,
318 0x3223103C,
319 0x3234103C,
320 0x3235103C,
321 0x3211103C,
322 0x3212103C,
323 0x3213103C,
324 0x3214103C,
325 0x3215103C,
326 0x3237103C,
327 0x323D103C,
328 0x40800E11,
329 0x409C0E11,
330 0x409D0E11,
331 0x40700E11,
332 0x40820E11,
333 0x40830E11,
334 0x409A0E11,
335 0x409B0E11,
336 0x40910E11,
337};
338
339
340static u32 soft_unresettable_controller[] = {
341 0x40800E11,
342 0x40700E11,
343 0x40820E11,
344 0x40830E11,
345 0x409A0E11,
346 0x409B0E11,
347 0x40910E11,
348
349
350
351
352
353
354
355 0x409C0E11,
356 0x409D0E11,
357};
358
359static int ctlr_is_hard_resettable(u32 board_id)
360{
361 int i;
362
363 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
364 if (unresettable_controller[i] == board_id)
365 return 0;
366 return 1;
367}
368
369static int ctlr_is_soft_resettable(u32 board_id)
370{
371 int i;
372
373 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
374 if (soft_unresettable_controller[i] == board_id)
375 return 0;
376 return 1;
377}
378
379static int ctlr_is_resettable(u32 board_id)
380{
381 return ctlr_is_hard_resettable(board_id) ||
382 ctlr_is_soft_resettable(board_id);
383}
384
385static ssize_t host_show_resettable(struct device *dev,
386 struct device_attribute *attr, char *buf)
387{
388 struct ctlr_info *h;
389 struct Scsi_Host *shost = class_to_shost(dev);
390
391 h = shost_to_hba(shost);
392 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
393}
394
395static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
396{
397 return (scsi3addr[3] & 0xC0) == 0x40;
398}
399
400static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
401 "1(ADM)", "UNKNOWN"
402};
403#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
404
405static ssize_t raid_level_show(struct device *dev,
406 struct device_attribute *attr, char *buf)
407{
408 ssize_t l = 0;
409 unsigned char rlevel;
410 struct ctlr_info *h;
411 struct scsi_device *sdev;
412 struct hpsa_scsi_dev_t *hdev;
413 unsigned long flags;
414
415 sdev = to_scsi_device(dev);
416 h = sdev_to_hba(sdev);
417 spin_lock_irqsave(&h->lock, flags);
418 hdev = sdev->hostdata;
419 if (!hdev) {
420 spin_unlock_irqrestore(&h->lock, flags);
421 return -ENODEV;
422 }
423
424
425 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
426 spin_unlock_irqrestore(&h->lock, flags);
427 l = snprintf(buf, PAGE_SIZE, "N/A\n");
428 return l;
429 }
430
431 rlevel = hdev->raid_level;
432 spin_unlock_irqrestore(&h->lock, flags);
433 if (rlevel > RAID_UNKNOWN)
434 rlevel = RAID_UNKNOWN;
435 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
436 return l;
437}
438
439static ssize_t lunid_show(struct device *dev,
440 struct device_attribute *attr, char *buf)
441{
442 struct ctlr_info *h;
443 struct scsi_device *sdev;
444 struct hpsa_scsi_dev_t *hdev;
445 unsigned long flags;
446 unsigned char lunid[8];
447
448 sdev = to_scsi_device(dev);
449 h = sdev_to_hba(sdev);
450 spin_lock_irqsave(&h->lock, flags);
451 hdev = sdev->hostdata;
452 if (!hdev) {
453 spin_unlock_irqrestore(&h->lock, flags);
454 return -ENODEV;
455 }
456 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
457 spin_unlock_irqrestore(&h->lock, flags);
458 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
459 lunid[0], lunid[1], lunid[2], lunid[3],
460 lunid[4], lunid[5], lunid[6], lunid[7]);
461}
462
463static ssize_t unique_id_show(struct device *dev,
464 struct device_attribute *attr, char *buf)
465{
466 struct ctlr_info *h;
467 struct scsi_device *sdev;
468 struct hpsa_scsi_dev_t *hdev;
469 unsigned long flags;
470 unsigned char sn[16];
471
472 sdev = to_scsi_device(dev);
473 h = sdev_to_hba(sdev);
474 spin_lock_irqsave(&h->lock, flags);
475 hdev = sdev->hostdata;
476 if (!hdev) {
477 spin_unlock_irqrestore(&h->lock, flags);
478 return -ENODEV;
479 }
480 memcpy(sn, hdev->device_id, sizeof(sn));
481 spin_unlock_irqrestore(&h->lock, flags);
482 return snprintf(buf, 16 * 2 + 2,
483 "%02X%02X%02X%02X%02X%02X%02X%02X"
484 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
485 sn[0], sn[1], sn[2], sn[3],
486 sn[4], sn[5], sn[6], sn[7],
487 sn[8], sn[9], sn[10], sn[11],
488 sn[12], sn[13], sn[14], sn[15]);
489}
490
491static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
492static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
493static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
494static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
495static DEVICE_ATTR(firmware_revision, S_IRUGO,
496 host_show_firmware_revision, NULL);
497static DEVICE_ATTR(commands_outstanding, S_IRUGO,
498 host_show_commands_outstanding, NULL);
499static DEVICE_ATTR(transport_mode, S_IRUGO,
500 host_show_transport_mode, NULL);
501static DEVICE_ATTR(resettable, S_IRUGO,
502 host_show_resettable, NULL);
503
504static struct device_attribute *hpsa_sdev_attrs[] = {
505 &dev_attr_raid_level,
506 &dev_attr_lunid,
507 &dev_attr_unique_id,
508 NULL,
509};
510
511static struct device_attribute *hpsa_shost_attrs[] = {
512 &dev_attr_rescan,
513 &dev_attr_firmware_revision,
514 &dev_attr_commands_outstanding,
515 &dev_attr_transport_mode,
516 &dev_attr_resettable,
517 NULL,
518};
519
520static struct scsi_host_template hpsa_driver_template = {
521 .module = THIS_MODULE,
522 .name = HPSA,
523 .proc_name = HPSA,
524 .queuecommand = hpsa_scsi_queue_command,
525 .scan_start = hpsa_scan_start,
526 .scan_finished = hpsa_scan_finished,
527 .change_queue_depth = hpsa_change_queue_depth,
528 .this_id = -1,
529 .use_clustering = ENABLE_CLUSTERING,
530 .eh_abort_handler = hpsa_eh_abort_handler,
531 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
532 .ioctl = hpsa_ioctl,
533 .slave_alloc = hpsa_slave_alloc,
534 .slave_destroy = hpsa_slave_destroy,
535#ifdef CONFIG_COMPAT
536 .compat_ioctl = hpsa_compat_ioctl,
537#endif
538 .sdev_attrs = hpsa_sdev_attrs,
539 .shost_attrs = hpsa_shost_attrs,
540 .max_sectors = 8192,
541};
542
543
544
545static inline void addQ(struct list_head *list, struct CommandList *c)
546{
547 list_add_tail(&c->list, list);
548}
549
550static inline u32 next_command(struct ctlr_info *h, u8 q)
551{
552 u32 a;
553 struct reply_pool *rq = &h->reply_queue[q];
554 unsigned long flags;
555
556 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
557 return h->access.command_completed(h, q);
558
559 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
560 a = rq->head[rq->current_entry];
561 rq->current_entry++;
562 spin_lock_irqsave(&h->lock, flags);
563 h->commands_outstanding--;
564 spin_unlock_irqrestore(&h->lock, flags);
565 } else {
566 a = FIFO_EMPTY;
567 }
568
569 if (rq->current_entry == h->max_commands) {
570 rq->current_entry = 0;
571 rq->wraparound ^= 1;
572 }
573 return a;
574}
575
576
577
578
579
580static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
581{
582 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
583 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
584 if (likely(h->msix_vector))
585 c->Header.ReplyQueue =
586 smp_processor_id() % h->nreply_queues;
587 }
588}
589
590static int is_firmware_flash_cmd(u8 *cdb)
591{
592 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
593}
594
595
596
597
598
599
600#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
601#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
602static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
603 struct CommandList *c)
604{
605 if (!is_firmware_flash_cmd(c->Request.CDB))
606 return;
607 atomic_inc(&h->firmware_flash_in_progress);
608 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
609}
610
611static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
612 struct CommandList *c)
613{
614 if (is_firmware_flash_cmd(c->Request.CDB) &&
615 atomic_dec_and_test(&h->firmware_flash_in_progress))
616 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
617}
618
619static void enqueue_cmd_and_start_io(struct ctlr_info *h,
620 struct CommandList *c)
621{
622 unsigned long flags;
623
624 set_performant_mode(h, c);
625 dial_down_lockup_detection_during_fw_flash(h, c);
626 spin_lock_irqsave(&h->lock, flags);
627 addQ(&h->reqQ, c);
628 h->Qdepth++;
629 spin_unlock_irqrestore(&h->lock, flags);
630 start_io(h);
631}
632
633static inline void removeQ(struct CommandList *c)
634{
635 if (WARN_ON(list_empty(&c->list)))
636 return;
637 list_del_init(&c->list);
638}
639
640static inline int is_hba_lunid(unsigned char scsi3addr[])
641{
642 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
643}
644
645static inline int is_scsi_rev_5(struct ctlr_info *h)
646{
647 if (!h->hba_inquiry_data)
648 return 0;
649 if ((h->hba_inquiry_data[2] & 0x07) == 5)
650 return 1;
651 return 0;
652}
653
654static int hpsa_find_target_lun(struct ctlr_info *h,
655 unsigned char scsi3addr[], int bus, int *target, int *lun)
656{
657
658
659
660 int i, found = 0;
661 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
662
663 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
664
665 for (i = 0; i < h->ndevices; i++) {
666 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
667 __set_bit(h->dev[i]->target, lun_taken);
668 }
669
670 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
671 if (i < HPSA_MAX_DEVICES) {
672
673 *target = i;
674 *lun = 0;
675 found = 1;
676 }
677 return !found;
678}
679
680
681static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
682 struct hpsa_scsi_dev_t *device,
683 struct hpsa_scsi_dev_t *added[], int *nadded)
684{
685
686 int n = h->ndevices;
687 int i;
688 unsigned char addr1[8], addr2[8];
689 struct hpsa_scsi_dev_t *sd;
690
691 if (n >= HPSA_MAX_DEVICES) {
692 dev_err(&h->pdev->dev, "too many devices, some will be "
693 "inaccessible.\n");
694 return -1;
695 }
696
697
698 if (device->lun != -1)
699
700 goto lun_assigned;
701
702
703
704
705
706 if (device->scsi3addr[4] == 0) {
707
708 if (hpsa_find_target_lun(h, device->scsi3addr,
709 device->bus, &device->target, &device->lun) != 0)
710 return -1;
711 goto lun_assigned;
712 }
713
714
715
716
717
718
719
720 memcpy(addr1, device->scsi3addr, 8);
721 addr1[4] = 0;
722 for (i = 0; i < n; i++) {
723 sd = h->dev[i];
724 memcpy(addr2, sd->scsi3addr, 8);
725 addr2[4] = 0;
726
727 if (memcmp(addr1, addr2, 8) == 0) {
728 device->bus = sd->bus;
729 device->target = sd->target;
730 device->lun = device->scsi3addr[4];
731 break;
732 }
733 }
734 if (device->lun == -1) {
735 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
736 " suspect firmware bug or unsupported hardware "
737 "configuration.\n");
738 return -1;
739 }
740
741lun_assigned:
742
743 h->dev[n] = device;
744 h->ndevices++;
745 added[*nadded] = device;
746 (*nadded)++;
747
748
749
750
751
752
753 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
754 scsi_device_type(device->devtype), hostno,
755 device->bus, device->target, device->lun);
756 return 0;
757}
758
759
760static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
761 int entry, struct hpsa_scsi_dev_t *new_entry)
762{
763
764 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
765
766
767 h->dev[entry]->raid_level = new_entry->raid_level;
768 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
769 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
770 new_entry->target, new_entry->lun);
771}
772
773
774static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
775 int entry, struct hpsa_scsi_dev_t *new_entry,
776 struct hpsa_scsi_dev_t *added[], int *nadded,
777 struct hpsa_scsi_dev_t *removed[], int *nremoved)
778{
779
780 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
781 removed[*nremoved] = h->dev[entry];
782 (*nremoved)++;
783
784
785
786
787
788 if (new_entry->target == -1) {
789 new_entry->target = h->dev[entry]->target;
790 new_entry->lun = h->dev[entry]->lun;
791 }
792
793 h->dev[entry] = new_entry;
794 added[*nadded] = new_entry;
795 (*nadded)++;
796 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
797 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
798 new_entry->target, new_entry->lun);
799}
800
801
802static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
803 struct hpsa_scsi_dev_t *removed[], int *nremoved)
804{
805
806 int i;
807 struct hpsa_scsi_dev_t *sd;
808
809 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
810
811 sd = h->dev[entry];
812 removed[*nremoved] = h->dev[entry];
813 (*nremoved)++;
814
815 for (i = entry; i < h->ndevices-1; i++)
816 h->dev[i] = h->dev[i+1];
817 h->ndevices--;
818 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
819 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
820 sd->lun);
821}
822
823#define SCSI3ADDR_EQ(a, b) ( \
824 (a)[7] == (b)[7] && \
825 (a)[6] == (b)[6] && \
826 (a)[5] == (b)[5] && \
827 (a)[4] == (b)[4] && \
828 (a)[3] == (b)[3] && \
829 (a)[2] == (b)[2] && \
830 (a)[1] == (b)[1] && \
831 (a)[0] == (b)[0])
832
833static void fixup_botched_add(struct ctlr_info *h,
834 struct hpsa_scsi_dev_t *added)
835{
836
837
838
839 unsigned long flags;
840 int i, j;
841
842 spin_lock_irqsave(&h->lock, flags);
843 for (i = 0; i < h->ndevices; i++) {
844 if (h->dev[i] == added) {
845 for (j = i; j < h->ndevices-1; j++)
846 h->dev[j] = h->dev[j+1];
847 h->ndevices--;
848 break;
849 }
850 }
851 spin_unlock_irqrestore(&h->lock, flags);
852 kfree(added);
853}
854
855static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
856 struct hpsa_scsi_dev_t *dev2)
857{
858
859
860
861
862 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
863 sizeof(dev1->scsi3addr)) != 0)
864 return 0;
865 if (memcmp(dev1->device_id, dev2->device_id,
866 sizeof(dev1->device_id)) != 0)
867 return 0;
868 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
869 return 0;
870 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
871 return 0;
872 if (dev1->devtype != dev2->devtype)
873 return 0;
874 if (dev1->bus != dev2->bus)
875 return 0;
876 return 1;
877}
878
879static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
880 struct hpsa_scsi_dev_t *dev2)
881{
882
883
884
885
886 if (dev1->raid_level != dev2->raid_level)
887 return 1;
888 return 0;
889}
890
891
892
893
894
895
896
897
898
899static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
900 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
901 int *index)
902{
903 int i;
904#define DEVICE_NOT_FOUND 0
905#define DEVICE_CHANGED 1
906#define DEVICE_SAME 2
907#define DEVICE_UPDATED 3
908 for (i = 0; i < haystack_size; i++) {
909 if (haystack[i] == NULL)
910 continue;
911 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
912 *index = i;
913 if (device_is_the_same(needle, haystack[i])) {
914 if (device_updated(needle, haystack[i]))
915 return DEVICE_UPDATED;
916 return DEVICE_SAME;
917 } else {
918 return DEVICE_CHANGED;
919 }
920 }
921 }
922 *index = -1;
923 return DEVICE_NOT_FOUND;
924}
925
926static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
927 struct hpsa_scsi_dev_t *sd[], int nsds)
928{
929
930
931
932
933 int i, entry, device_change, changes = 0;
934 struct hpsa_scsi_dev_t *csd;
935 unsigned long flags;
936 struct hpsa_scsi_dev_t **added, **removed;
937 int nadded, nremoved;
938 struct Scsi_Host *sh = NULL;
939
940 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
941 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
942
943 if (!added || !removed) {
944 dev_warn(&h->pdev->dev, "out of memory in "
945 "adjust_hpsa_scsi_table\n");
946 goto free_and_out;
947 }
948
949 spin_lock_irqsave(&h->devlock, flags);
950
951
952
953
954
955
956
957
958 i = 0;
959 nremoved = 0;
960 nadded = 0;
961 while (i < h->ndevices) {
962 csd = h->dev[i];
963 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
964 if (device_change == DEVICE_NOT_FOUND) {
965 changes++;
966 hpsa_scsi_remove_entry(h, hostno, i,
967 removed, &nremoved);
968 continue;
969 } else if (device_change == DEVICE_CHANGED) {
970 changes++;
971 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
972 added, &nadded, removed, &nremoved);
973
974
975
976 sd[entry] = NULL;
977 } else if (device_change == DEVICE_UPDATED) {
978 hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
979 }
980 i++;
981 }
982
983
984
985
986
987 for (i = 0; i < nsds; i++) {
988 if (!sd[i])
989 continue;
990 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
991 h->ndevices, &entry);
992 if (device_change == DEVICE_NOT_FOUND) {
993 changes++;
994 if (hpsa_scsi_add_entry(h, hostno, sd[i],
995 added, &nadded) != 0)
996 break;
997 sd[i] = NULL;
998 } else if (device_change == DEVICE_CHANGED) {
999
1000 changes++;
1001 dev_warn(&h->pdev->dev,
1002 "device unexpectedly changed.\n");
1003
1004 }
1005 }
1006 spin_unlock_irqrestore(&h->devlock, flags);
1007
1008
1009
1010
1011
1012 if (hostno == -1 || !changes)
1013 goto free_and_out;
1014
1015 sh = h->scsi_host;
1016
1017 for (i = 0; i < nremoved; i++) {
1018 struct scsi_device *sdev =
1019 scsi_device_lookup(sh, removed[i]->bus,
1020 removed[i]->target, removed[i]->lun);
1021 if (sdev != NULL) {
1022 scsi_remove_device(sdev);
1023 scsi_device_put(sdev);
1024 } else {
1025
1026
1027
1028
1029 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
1030 " for removal.", hostno, removed[i]->bus,
1031 removed[i]->target, removed[i]->lun);
1032 }
1033 kfree(removed[i]);
1034 removed[i] = NULL;
1035 }
1036
1037
1038 for (i = 0; i < nadded; i++) {
1039 if (scsi_add_device(sh, added[i]->bus,
1040 added[i]->target, added[i]->lun) == 0)
1041 continue;
1042 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
1043 "device not added.\n", hostno, added[i]->bus,
1044 added[i]->target, added[i]->lun);
1045
1046
1047
1048 fixup_botched_add(h, added[i]);
1049 }
1050
1051free_and_out:
1052 kfree(added);
1053 kfree(removed);
1054}
1055
1056
1057
1058
1059
1060static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1061 int bus, int target, int lun)
1062{
1063 int i;
1064 struct hpsa_scsi_dev_t *sd;
1065
1066 for (i = 0; i < h->ndevices; i++) {
1067 sd = h->dev[i];
1068 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1069 return sd;
1070 }
1071 return NULL;
1072}
1073
1074
1075static int hpsa_slave_alloc(struct scsi_device *sdev)
1076{
1077 struct hpsa_scsi_dev_t *sd;
1078 unsigned long flags;
1079 struct ctlr_info *h;
1080
1081 h = sdev_to_hba(sdev);
1082 spin_lock_irqsave(&h->devlock, flags);
1083 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1084 sdev_id(sdev), sdev->lun);
1085 if (sd != NULL)
1086 sdev->hostdata = sd;
1087 spin_unlock_irqrestore(&h->devlock, flags);
1088 return 0;
1089}
1090
1091static void hpsa_slave_destroy(struct scsi_device *sdev)
1092{
1093
1094}
1095
1096static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1097{
1098 int i;
1099
1100 if (!h->cmd_sg_list)
1101 return;
1102 for (i = 0; i < h->nr_cmds; i++) {
1103 kfree(h->cmd_sg_list[i]);
1104 h->cmd_sg_list[i] = NULL;
1105 }
1106 kfree(h->cmd_sg_list);
1107 h->cmd_sg_list = NULL;
1108}
1109
1110static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
1111{
1112 int i;
1113
1114 if (h->chainsize <= 0)
1115 return 0;
1116
1117 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1118 GFP_KERNEL);
1119 if (!h->cmd_sg_list)
1120 return -ENOMEM;
1121 for (i = 0; i < h->nr_cmds; i++) {
1122 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1123 h->chainsize, GFP_KERNEL);
1124 if (!h->cmd_sg_list[i])
1125 goto clean;
1126 }
1127 return 0;
1128
1129clean:
1130 hpsa_free_sg_chain_blocks(h);
1131 return -ENOMEM;
1132}
1133
1134static int hpsa_map_sg_chain_block(struct ctlr_info *h,
1135 struct CommandList *c)
1136{
1137 struct SGDescriptor *chain_sg, *chain_block;
1138 u64 temp64;
1139
1140 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1141 chain_block = h->cmd_sg_list[c->cmdindex];
1142 chain_sg->Ext = HPSA_SG_CHAIN;
1143 chain_sg->Len = sizeof(*chain_sg) *
1144 (c->Header.SGTotal - h->max_cmd_sg_entries);
1145 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
1146 PCI_DMA_TODEVICE);
1147 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1148
1149 chain_sg->Addr.lower = 0;
1150 chain_sg->Addr.upper = 0;
1151 return -1;
1152 }
1153 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
1154 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
1155 return 0;
1156}
1157
1158static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1159 struct CommandList *c)
1160{
1161 struct SGDescriptor *chain_sg;
1162 union u64bit temp64;
1163
1164 if (c->Header.SGTotal <= h->max_cmd_sg_entries)
1165 return;
1166
1167 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1168 temp64.val32.lower = chain_sg->Addr.lower;
1169 temp64.val32.upper = chain_sg->Addr.upper;
1170 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
1171}
1172
1173static void complete_scsi_command(struct CommandList *cp)
1174{
1175 struct scsi_cmnd *cmd;
1176 struct ctlr_info *h;
1177 struct ErrorInfo *ei;
1178
1179 unsigned char sense_key;
1180 unsigned char asc;
1181 unsigned char ascq;
1182 unsigned long sense_data_size;
1183
1184 ei = cp->err_info;
1185 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
1186 h = cp->h;
1187
1188 scsi_dma_unmap(cmd);
1189 if (cp->Header.SGTotal > h->max_cmd_sg_entries)
1190 hpsa_unmap_sg_chain_block(h, cp);
1191
1192 cmd->result = (DID_OK << 16);
1193 cmd->result |= (COMMAND_COMPLETE << 8);
1194 cmd->result |= ei->ScsiStatus;
1195
1196
1197 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
1198 sense_data_size = SCSI_SENSE_BUFFERSIZE;
1199 else
1200 sense_data_size = sizeof(ei->SenseInfo);
1201 if (ei->SenseLen < sense_data_size)
1202 sense_data_size = ei->SenseLen;
1203
1204 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
1205 scsi_set_resid(cmd, ei->ResidualCnt);
1206
1207 if (ei->CommandStatus == 0) {
1208 cmd->scsi_done(cmd);
1209 cmd_free(h, cp);
1210 return;
1211 }
1212
1213
1214 switch (ei->CommandStatus) {
1215
1216 case CMD_TARGET_STATUS:
1217 if (ei->ScsiStatus) {
1218
1219 sense_key = 0xf & ei->SenseInfo[2];
1220
1221 asc = ei->SenseInfo[12];
1222
1223 ascq = ei->SenseInfo[13];
1224 }
1225
1226 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1227 if (check_for_unit_attention(h, cp)) {
1228 cmd->result = DID_SOFT_ERROR << 16;
1229 break;
1230 }
1231 if (sense_key == ILLEGAL_REQUEST) {
1232
1233
1234
1235
1236 if (cp->Request.CDB[0] == REPORT_LUNS)
1237 break;
1238
1239
1240
1241
1242 if ((asc == 0x25) && (ascq == 0x0)) {
1243 dev_warn(&h->pdev->dev, "cp %p "
1244 "has check condition\n", cp);
1245 break;
1246 }
1247 }
1248
1249 if (sense_key == NOT_READY) {
1250
1251
1252
1253
1254 if ((asc == 0x04) && (ascq == 0x03)) {
1255 dev_warn(&h->pdev->dev, "cp %p "
1256 "has check condition: unit "
1257 "not ready, manual "
1258 "intervention required\n", cp);
1259 break;
1260 }
1261 }
1262 if (sense_key == ABORTED_COMMAND) {
1263
1264 dev_warn(&h->pdev->dev, "cp %p "
1265 "has check condition: aborted command: "
1266 "ASC: 0x%x, ASCQ: 0x%x\n",
1267 cp, asc, ascq);
1268 cmd->result = DID_SOFT_ERROR << 16;
1269 break;
1270 }
1271
1272 dev_dbg(&h->pdev->dev, "cp %p has check condition: "
1273 "unknown type: "
1274 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1275 "Returning result: 0x%x, "
1276 "cmd=[%02x %02x %02x %02x %02x "
1277 "%02x %02x %02x %02x %02x %02x "
1278 "%02x %02x %02x %02x %02x]\n",
1279 cp, sense_key, asc, ascq,
1280 cmd->result,
1281 cmd->cmnd[0], cmd->cmnd[1],
1282 cmd->cmnd[2], cmd->cmnd[3],
1283 cmd->cmnd[4], cmd->cmnd[5],
1284 cmd->cmnd[6], cmd->cmnd[7],
1285 cmd->cmnd[8], cmd->cmnd[9],
1286 cmd->cmnd[10], cmd->cmnd[11],
1287 cmd->cmnd[12], cmd->cmnd[13],
1288 cmd->cmnd[14], cmd->cmnd[15]);
1289 break;
1290 }
1291
1292
1293
1294
1295
1296 if (ei->ScsiStatus) {
1297 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1298 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1299 "Returning result: 0x%x\n",
1300 cp, ei->ScsiStatus,
1301 sense_key, asc, ascq,
1302 cmd->result);
1303 } else {
1304 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1305 "Returning no connection.\n", cp),
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319 cmd->result = DID_NO_CONNECT << 16;
1320 }
1321 break;
1322
1323 case CMD_DATA_UNDERRUN:
1324 break;
1325 case CMD_DATA_OVERRUN:
1326 dev_warn(&h->pdev->dev, "cp %p has"
1327 " completed with data overrun "
1328 "reported\n", cp);
1329 break;
1330 case CMD_INVALID: {
1331
1332
1333
1334
1335
1336
1337
1338
1339 cmd->result = DID_NO_CONNECT << 16;
1340 }
1341 break;
1342 case CMD_PROTOCOL_ERR:
1343 cmd->result = DID_ERROR << 16;
1344 dev_warn(&h->pdev->dev, "cp %p has "
1345 "protocol error\n", cp);
1346 break;
1347 case CMD_HARDWARE_ERR:
1348 cmd->result = DID_ERROR << 16;
1349 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
1350 break;
1351 case CMD_CONNECTION_LOST:
1352 cmd->result = DID_ERROR << 16;
1353 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1354 break;
1355 case CMD_ABORTED:
1356 cmd->result = DID_ABORT << 16;
1357 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1358 cp, ei->ScsiStatus);
1359 break;
1360 case CMD_ABORT_FAILED:
1361 cmd->result = DID_ERROR << 16;
1362 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1363 break;
1364 case CMD_UNSOLICITED_ABORT:
1365 cmd->result = DID_SOFT_ERROR << 16;
1366 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
1367 "abort\n", cp);
1368 break;
1369 case CMD_TIMEOUT:
1370 cmd->result = DID_TIME_OUT << 16;
1371 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1372 break;
1373 case CMD_UNABORTABLE:
1374 cmd->result = DID_ERROR << 16;
1375 dev_warn(&h->pdev->dev, "Command unabortable\n");
1376 break;
1377 default:
1378 cmd->result = DID_ERROR << 16;
1379 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1380 cp, ei->CommandStatus);
1381 }
1382 cmd->scsi_done(cmd);
1383 cmd_free(h, cp);
1384}
1385
1386static void hpsa_pci_unmap(struct pci_dev *pdev,
1387 struct CommandList *c, int sg_used, int data_direction)
1388{
1389 int i;
1390 union u64bit addr64;
1391
1392 for (i = 0; i < sg_used; i++) {
1393 addr64.val32.lower = c->SG[i].Addr.lower;
1394 addr64.val32.upper = c->SG[i].Addr.upper;
1395 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
1396 data_direction);
1397 }
1398}
1399
1400static int hpsa_map_one(struct pci_dev *pdev,
1401 struct CommandList *cp,
1402 unsigned char *buf,
1403 size_t buflen,
1404 int data_direction)
1405{
1406 u64 addr64;
1407
1408 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1409 cp->Header.SGList = 0;
1410 cp->Header.SGTotal = 0;
1411 return 0;
1412 }
1413
1414 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
1415 if (dma_mapping_error(&pdev->dev, addr64)) {
1416
1417 cp->Header.SGList = 0;
1418 cp->Header.SGTotal = 0;
1419 return -1;
1420 }
1421 cp->SG[0].Addr.lower =
1422 (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
1423 cp->SG[0].Addr.upper =
1424 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
1425 cp->SG[0].Len = buflen;
1426 cp->Header.SGList = (u8) 1;
1427 cp->Header.SGTotal = (u16) 1;
1428 return 0;
1429}
1430
1431static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1432 struct CommandList *c)
1433{
1434 DECLARE_COMPLETION_ONSTACK(wait);
1435
1436 c->waiting = &wait;
1437 enqueue_cmd_and_start_io(h, c);
1438 wait_for_completion(&wait);
1439}
1440
1441static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
1442 struct CommandList *c)
1443{
1444 unsigned long flags;
1445
1446
1447 spin_lock_irqsave(&h->lock, flags);
1448 if (unlikely(h->lockup_detected)) {
1449 spin_unlock_irqrestore(&h->lock, flags);
1450 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
1451 } else {
1452 spin_unlock_irqrestore(&h->lock, flags);
1453 hpsa_scsi_do_simple_cmd_core(h, c);
1454 }
1455}
1456
1457#define MAX_DRIVER_CMD_RETRIES 25
1458static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
1459 struct CommandList *c, int data_direction)
1460{
1461 int backoff_time = 10, retry_count = 0;
1462
1463 do {
1464 memset(c->err_info, 0, sizeof(*c->err_info));
1465 hpsa_scsi_do_simple_cmd_core(h, c);
1466 retry_count++;
1467 if (retry_count > 3) {
1468 msleep(backoff_time);
1469 if (backoff_time < 1000)
1470 backoff_time *= 2;
1471 }
1472 } while ((check_for_unit_attention(h, c) ||
1473 check_for_busy(h, c)) &&
1474 retry_count <= MAX_DRIVER_CMD_RETRIES);
1475 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
1476}
1477
1478static void hpsa_scsi_interpret_error(struct CommandList *cp)
1479{
1480 struct ErrorInfo *ei;
1481 struct device *d = &cp->h->pdev->dev;
1482
1483 ei = cp->err_info;
1484 switch (ei->CommandStatus) {
1485 case CMD_TARGET_STATUS:
1486 dev_warn(d, "cmd %p has completed with errors\n", cp);
1487 dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
1488 ei->ScsiStatus);
1489 if (ei->ScsiStatus == 0)
1490 dev_warn(d, "SCSI status is abnormally zero. "
1491 "(probably indicates selection timeout "
1492 "reported incorrectly due to a known "
1493 "firmware bug, circa July, 2001.)\n");
1494 break;
1495 case CMD_DATA_UNDERRUN:
1496 dev_info(d, "UNDERRUN\n");
1497 break;
1498 case CMD_DATA_OVERRUN:
1499 dev_warn(d, "cp %p has completed with data overrun\n", cp);
1500 break;
1501 case CMD_INVALID: {
1502
1503
1504
1505 dev_warn(d, "cp %p is reported invalid (probably means "
1506 "target device no longer present)\n", cp);
1507
1508
1509 }
1510 break;
1511 case CMD_PROTOCOL_ERR:
1512 dev_warn(d, "cp %p has protocol error \n", cp);
1513 break;
1514 case CMD_HARDWARE_ERR:
1515
1516 dev_warn(d, "cp %p had hardware error\n", cp);
1517 break;
1518 case CMD_CONNECTION_LOST:
1519 dev_warn(d, "cp %p had connection lost\n", cp);
1520 break;
1521 case CMD_ABORTED:
1522 dev_warn(d, "cp %p was aborted\n", cp);
1523 break;
1524 case CMD_ABORT_FAILED:
1525 dev_warn(d, "cp %p reports abort failed\n", cp);
1526 break;
1527 case CMD_UNSOLICITED_ABORT:
1528 dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
1529 break;
1530 case CMD_TIMEOUT:
1531 dev_warn(d, "cp %p timed out\n", cp);
1532 break;
1533 case CMD_UNABORTABLE:
1534 dev_warn(d, "Command unabortable\n");
1535 break;
1536 default:
1537 dev_warn(d, "cp %p returned unknown status %x\n", cp,
1538 ei->CommandStatus);
1539 }
1540}
1541
1542static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
1543 unsigned char page, unsigned char *buf,
1544 unsigned char bufsize)
1545{
1546 int rc = IO_OK;
1547 struct CommandList *c;
1548 struct ErrorInfo *ei;
1549
1550 c = cmd_special_alloc(h);
1551
1552 if (c == NULL) {
1553 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1554 return -ENOMEM;
1555 }
1556
1557 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
1558 page, scsi3addr, TYPE_CMD)) {
1559 rc = -1;
1560 goto out;
1561 }
1562 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1563 ei = c->err_info;
1564 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
1565 hpsa_scsi_interpret_error(c);
1566 rc = -1;
1567 }
1568out:
1569 cmd_special_free(h, c);
1570 return rc;
1571}
1572
1573static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
1574{
1575 int rc = IO_OK;
1576 struct CommandList *c;
1577 struct ErrorInfo *ei;
1578
1579 c = cmd_special_alloc(h);
1580
1581 if (c == NULL) {
1582 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1583 return -ENOMEM;
1584 }
1585
1586
1587 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h,
1588 NULL, 0, 0, scsi3addr, TYPE_MSG);
1589 hpsa_scsi_do_simple_cmd_core(h, c);
1590
1591
1592 ei = c->err_info;
1593 if (ei->CommandStatus != 0) {
1594 hpsa_scsi_interpret_error(c);
1595 rc = -1;
1596 }
1597 cmd_special_free(h, c);
1598 return rc;
1599}
1600
1601static void hpsa_get_raid_level(struct ctlr_info *h,
1602 unsigned char *scsi3addr, unsigned char *raid_level)
1603{
1604 int rc;
1605 unsigned char *buf;
1606
1607 *raid_level = RAID_UNKNOWN;
1608 buf = kzalloc(64, GFP_KERNEL);
1609 if (!buf)
1610 return;
1611 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
1612 if (rc == 0)
1613 *raid_level = buf[8];
1614 if (*raid_level > RAID_UNKNOWN)
1615 *raid_level = RAID_UNKNOWN;
1616 kfree(buf);
1617 return;
1618}
1619
1620
1621static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
1622 unsigned char *device_id, int buflen)
1623{
1624 int rc;
1625 unsigned char *buf;
1626
1627 if (buflen > 16)
1628 buflen = 16;
1629 buf = kzalloc(64, GFP_KERNEL);
1630 if (!buf)
1631 return -1;
1632 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
1633 if (rc == 0)
1634 memcpy(device_id, &buf[8], buflen);
1635 kfree(buf);
1636 return rc != 0;
1637}
1638
1639static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
1640 struct ReportLUNdata *buf, int bufsize,
1641 int extended_response)
1642{
1643 int rc = IO_OK;
1644 struct CommandList *c;
1645 unsigned char scsi3addr[8];
1646 struct ErrorInfo *ei;
1647
1648 c = cmd_special_alloc(h);
1649 if (c == NULL) {
1650 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1651 return -1;
1652 }
1653
1654 memset(scsi3addr, 0, sizeof(scsi3addr));
1655 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
1656 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
1657 rc = -1;
1658 goto out;
1659 }
1660 if (extended_response)
1661 c->Request.CDB[1] = extended_response;
1662 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1663 ei = c->err_info;
1664 if (ei->CommandStatus != 0 &&
1665 ei->CommandStatus != CMD_DATA_UNDERRUN) {
1666 hpsa_scsi_interpret_error(c);
1667 rc = -1;
1668 }
1669out:
1670 cmd_special_free(h, c);
1671 return rc;
1672}
1673
1674static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
1675 struct ReportLUNdata *buf,
1676 int bufsize, int extended_response)
1677{
1678 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
1679}
1680
1681static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
1682 struct ReportLUNdata *buf, int bufsize)
1683{
1684 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
1685}
1686
1687static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
1688 int bus, int target, int lun)
1689{
1690 device->bus = bus;
1691 device->target = target;
1692 device->lun = lun;
1693}
1694
1695static int hpsa_update_device_info(struct ctlr_info *h,
1696 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
1697 unsigned char *is_OBDR_device)
1698{
1699
1700#define OBDR_SIG_OFFSET 43
1701#define OBDR_TAPE_SIG "$DR-10"
1702#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
1703#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
1704
1705 unsigned char *inq_buff;
1706 unsigned char *obdr_sig;
1707
1708 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1709 if (!inq_buff)
1710 goto bail_out;
1711
1712
1713 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
1714 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
1715
1716 dev_err(&h->pdev->dev,
1717 "hpsa_update_device_info: inquiry failed\n");
1718 goto bail_out;
1719 }
1720
1721 this_device->devtype = (inq_buff[0] & 0x1f);
1722 memcpy(this_device->scsi3addr, scsi3addr, 8);
1723 memcpy(this_device->vendor, &inq_buff[8],
1724 sizeof(this_device->vendor));
1725 memcpy(this_device->model, &inq_buff[16],
1726 sizeof(this_device->model));
1727 memset(this_device->device_id, 0,
1728 sizeof(this_device->device_id));
1729 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
1730 sizeof(this_device->device_id));
1731
1732 if (this_device->devtype == TYPE_DISK &&
1733 is_logical_dev_addr_mode(scsi3addr))
1734 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
1735 else
1736 this_device->raid_level = RAID_UNKNOWN;
1737
1738 if (is_OBDR_device) {
1739
1740
1741
1742 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
1743 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
1744 strncmp(obdr_sig, OBDR_TAPE_SIG,
1745 OBDR_SIG_LEN) == 0);
1746 }
1747
1748 kfree(inq_buff);
1749 return 0;
1750
1751bail_out:
1752 kfree(inq_buff);
1753 return 1;
1754}
1755
1756static unsigned char *ext_target_model[] = {
1757 "MSA2012",
1758 "MSA2024",
1759 "MSA2312",
1760 "MSA2324",
1761 "P2000 G3 SAS",
1762 NULL,
1763};
1764
1765static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1766{
1767 int i;
1768
1769 for (i = 0; ext_target_model[i]; i++)
1770 if (strncmp(device->model, ext_target_model[i],
1771 strlen(ext_target_model[i])) == 0)
1772 return 1;
1773 return 0;
1774}
1775
1776
1777
1778
1779
1780
1781
1782
1783static void figure_bus_target_lun(struct ctlr_info *h,
1784 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
1785{
1786 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1787
1788 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
1789
1790 if (is_hba_lunid(lunaddrbytes))
1791 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
1792 else
1793
1794 hpsa_set_bus_target_lun(device, 2, -1, -1);
1795 return;
1796 }
1797
1798 if (is_ext_target(h, device)) {
1799
1800
1801
1802
1803 hpsa_set_bus_target_lun(device,
1804 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
1805 return;
1806 }
1807 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
1808}
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821static int add_ext_target_dev(struct ctlr_info *h,
1822 struct hpsa_scsi_dev_t *tmpdevice,
1823 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
1824 unsigned long lunzerobits[], int *n_ext_target_devs)
1825{
1826 unsigned char scsi3addr[8];
1827
1828 if (test_bit(tmpdevice->target, lunzerobits))
1829 return 0;
1830
1831 if (!is_logical_dev_addr_mode(lunaddrbytes))
1832 return 0;
1833
1834 if (!is_ext_target(h, tmpdevice))
1835 return 0;
1836
1837 if (tmpdevice->lun == 0)
1838 return 0;
1839
1840 memset(scsi3addr, 0, 8);
1841 scsi3addr[3] = tmpdevice->target;
1842 if (is_hba_lunid(scsi3addr))
1843 return 0;
1844
1845 if (is_scsi_rev_5(h))
1846 return 0;
1847
1848 if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
1849 dev_warn(&h->pdev->dev, "Maximum number of external "
1850 "target devices exceeded. Check your hardware "
1851 "configuration.");
1852 return 0;
1853 }
1854
1855 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
1856 return 0;
1857 (*n_ext_target_devs)++;
1858 hpsa_set_bus_target_lun(this_device,
1859 tmpdevice->bus, tmpdevice->target, 0);
1860 set_bit(tmpdevice->target, lunzerobits);
1861 return 1;
1862}
1863
1864
1865
1866
1867
1868
1869
1870static int hpsa_gather_lun_info(struct ctlr_info *h,
1871 int reportlunsize,
1872 struct ReportLUNdata *physdev, u32 *nphysicals,
1873 struct ReportLUNdata *logdev, u32 *nlogicals)
1874{
1875 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
1876 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
1877 return -1;
1878 }
1879 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
1880 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
1881 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
1882 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1883 *nphysicals - HPSA_MAX_PHYS_LUN);
1884 *nphysicals = HPSA_MAX_PHYS_LUN;
1885 }
1886 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
1887 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
1888 return -1;
1889 }
1890 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
1891
1892 if (*nlogicals > HPSA_MAX_LUN) {
1893 dev_warn(&h->pdev->dev,
1894 "maximum logical LUNs (%d) exceeded. "
1895 "%d LUNs ignored.\n", HPSA_MAX_LUN,
1896 *nlogicals - HPSA_MAX_LUN);
1897 *nlogicals = HPSA_MAX_LUN;
1898 }
1899 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
1900 dev_warn(&h->pdev->dev,
1901 "maximum logical + physical LUNs (%d) exceeded. "
1902 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1903 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
1904 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
1905 }
1906 return 0;
1907}
1908
1909u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
1910 int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list,
1911 struct ReportLUNdata *logdev_list)
1912{
1913
1914
1915
1916
1917
1918 int logicals_start = nphysicals + (raid_ctlr_position == 0);
1919 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
1920
1921 if (i == raid_ctlr_position)
1922 return RAID_CTLR_LUNID;
1923
1924 if (i < logicals_start)
1925 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0];
1926
1927 if (i < last_device)
1928 return &logdev_list->LUN[i - nphysicals -
1929 (raid_ctlr_position == 0)][0];
1930 BUG();
1931 return NULL;
1932}
1933
1934static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1935{
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946 struct ReportLUNdata *physdev_list = NULL;
1947 struct ReportLUNdata *logdev_list = NULL;
1948 u32 nphysicals = 0;
1949 u32 nlogicals = 0;
1950 u32 ndev_allocated = 0;
1951 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
1952 int ncurrent = 0;
1953 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
1954 int i, n_ext_target_devs, ndevs_to_allocate;
1955 int raid_ctlr_position;
1956 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
1957
1958 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
1959 physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1960 logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1961 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
1962
1963 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
1964 dev_err(&h->pdev->dev, "out of memory\n");
1965 goto out;
1966 }
1967 memset(lunzerobits, 0, sizeof(lunzerobits));
1968
1969 if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
1970 logdev_list, &nlogicals))
1971 goto out;
1972
1973
1974
1975
1976
1977 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
1978
1979
1980 for (i = 0; i < ndevs_to_allocate; i++) {
1981 if (i >= HPSA_MAX_DEVICES) {
1982 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
1983 " %d devices ignored.\n", HPSA_MAX_DEVICES,
1984 ndevs_to_allocate - HPSA_MAX_DEVICES);
1985 break;
1986 }
1987
1988 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
1989 if (!currentsd[i]) {
1990 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
1991 __FILE__, __LINE__);
1992 goto out;
1993 }
1994 ndev_allocated++;
1995 }
1996
1997 if (unlikely(is_scsi_rev_5(h)))
1998 raid_ctlr_position = 0;
1999 else
2000 raid_ctlr_position = nphysicals + nlogicals;
2001
2002
2003 n_ext_target_devs = 0;
2004 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
2005 u8 *lunaddrbytes, is_OBDR = 0;
2006
2007
2008 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
2009 i, nphysicals, nlogicals, physdev_list, logdev_list);
2010
2011 if (lunaddrbytes[3] & 0xC0 &&
2012 i < nphysicals + (raid_ctlr_position == 0))
2013 continue;
2014
2015
2016 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
2017 &is_OBDR))
2018 continue;
2019 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
2020 this_device = currentsd[ncurrent];
2021
2022
2023
2024
2025
2026
2027
2028
2029 if (add_ext_target_dev(h, tmpdevice, this_device,
2030 lunaddrbytes, lunzerobits,
2031 &n_ext_target_devs)) {
2032 ncurrent++;
2033 this_device = currentsd[ncurrent];
2034 }
2035
2036 *this_device = *tmpdevice;
2037
2038 switch (this_device->devtype) {
2039 case TYPE_ROM:
2040
2041
2042
2043
2044
2045
2046
2047 if (is_OBDR)
2048 ncurrent++;
2049 break;
2050 case TYPE_DISK:
2051 if (i < nphysicals)
2052 break;
2053 ncurrent++;
2054 break;
2055 case TYPE_TAPE:
2056 case TYPE_MEDIUM_CHANGER:
2057 ncurrent++;
2058 break;
2059 case TYPE_RAID:
2060
2061
2062
2063
2064
2065 if (!is_hba_lunid(lunaddrbytes))
2066 break;
2067 ncurrent++;
2068 break;
2069 default:
2070 break;
2071 }
2072 if (ncurrent >= HPSA_MAX_DEVICES)
2073 break;
2074 }
2075 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
2076out:
2077 kfree(tmpdevice);
2078 for (i = 0; i < ndev_allocated; i++)
2079 kfree(currentsd[i]);
2080 kfree(currentsd);
2081 kfree(physdev_list);
2082 kfree(logdev_list);
2083}
2084
2085
2086
2087
2088
2089static int hpsa_scatter_gather(struct ctlr_info *h,
2090 struct CommandList *cp,
2091 struct scsi_cmnd *cmd)
2092{
2093 unsigned int len;
2094 struct scatterlist *sg;
2095 u64 addr64;
2096 int use_sg, i, sg_index, chained;
2097 struct SGDescriptor *curr_sg;
2098
2099 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
2100
2101 use_sg = scsi_dma_map(cmd);
2102 if (use_sg < 0)
2103 return use_sg;
2104
2105 if (!use_sg)
2106 goto sglist_finished;
2107
2108 curr_sg = cp->SG;
2109 chained = 0;
2110 sg_index = 0;
2111 scsi_for_each_sg(cmd, sg, use_sg, i) {
2112 if (i == h->max_cmd_sg_entries - 1 &&
2113 use_sg > h->max_cmd_sg_entries) {
2114 chained = 1;
2115 curr_sg = h->cmd_sg_list[cp->cmdindex];
2116 sg_index = 0;
2117 }
2118 addr64 = (u64) sg_dma_address(sg);
2119 len = sg_dma_len(sg);
2120 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
2121 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
2122 curr_sg->Len = len;
2123 curr_sg->Ext = 0;
2124 curr_sg++;
2125 }
2126
2127 if (use_sg + chained > h->maxSG)
2128 h->maxSG = use_sg + chained;
2129
2130 if (chained) {
2131 cp->Header.SGList = h->max_cmd_sg_entries;
2132 cp->Header.SGTotal = (u16) (use_sg + 1);
2133 if (hpsa_map_sg_chain_block(h, cp)) {
2134 scsi_dma_unmap(cmd);
2135 return -1;
2136 }
2137 return 0;
2138 }
2139
2140sglist_finished:
2141
2142 cp->Header.SGList = (u8) use_sg;
2143 cp->Header.SGTotal = (u16) use_sg;
2144 return 0;
2145}
2146
2147
2148static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
2149 void (*done)(struct scsi_cmnd *))
2150{
2151 struct ctlr_info *h;
2152 struct hpsa_scsi_dev_t *dev;
2153 unsigned char scsi3addr[8];
2154 struct CommandList *c;
2155 unsigned long flags;
2156
2157
2158 h = sdev_to_hba(cmd->device);
2159 dev = cmd->device->hostdata;
2160 if (!dev) {
2161 cmd->result = DID_NO_CONNECT << 16;
2162 done(cmd);
2163 return 0;
2164 }
2165 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
2166
2167 spin_lock_irqsave(&h->lock, flags);
2168 if (unlikely(h->lockup_detected)) {
2169 spin_unlock_irqrestore(&h->lock, flags);
2170 cmd->result = DID_ERROR << 16;
2171 done(cmd);
2172 return 0;
2173 }
2174 spin_unlock_irqrestore(&h->lock, flags);
2175 c = cmd_alloc(h);
2176 if (c == NULL) {
2177 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2178 return SCSI_MLQUEUE_HOST_BUSY;
2179 }
2180
2181
2182
2183 cmd->scsi_done = done;
2184
2185
2186 cmd->host_scribble = (unsigned char *) c;
2187
2188 c->cmd_type = CMD_SCSI;
2189 c->scsi_cmd = cmd;
2190 c->Header.ReplyQueue = 0;
2191 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
2192 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
2193 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
2194
2195
2196
2197 c->Request.Timeout = 0;
2198 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
2199 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
2200 c->Request.CDBLen = cmd->cmd_len;
2201 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
2202 c->Request.Type.Type = TYPE_CMD;
2203 c->Request.Type.Attribute = ATTR_SIMPLE;
2204 switch (cmd->sc_data_direction) {
2205 case DMA_TO_DEVICE:
2206 c->Request.Type.Direction = XFER_WRITE;
2207 break;
2208 case DMA_FROM_DEVICE:
2209 c->Request.Type.Direction = XFER_READ;
2210 break;
2211 case DMA_NONE:
2212 c->Request.Type.Direction = XFER_NONE;
2213 break;
2214 case DMA_BIDIRECTIONAL:
2215
2216
2217
2218
2219
2220 c->Request.Type.Direction = XFER_RSVD;
2221
2222
2223
2224
2225
2226
2227
2228
2229 break;
2230
2231 default:
2232 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
2233 cmd->sc_data_direction);
2234 BUG();
2235 break;
2236 }
2237
2238 if (hpsa_scatter_gather(h, c, cmd) < 0) {
2239 cmd_free(h, c);
2240 return SCSI_MLQUEUE_HOST_BUSY;
2241 }
2242 enqueue_cmd_and_start_io(h, c);
2243
2244 return 0;
2245}
2246
2247static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
2248
2249static void hpsa_scan_start(struct Scsi_Host *sh)
2250{
2251 struct ctlr_info *h = shost_to_hba(sh);
2252 unsigned long flags;
2253
2254
2255 while (1) {
2256 spin_lock_irqsave(&h->scan_lock, flags);
2257 if (h->scan_finished)
2258 break;
2259 spin_unlock_irqrestore(&h->scan_lock, flags);
2260 wait_event(h->scan_wait_queue, h->scan_finished);
2261
2262
2263
2264
2265
2266 }
2267 h->scan_finished = 0;
2268 spin_unlock_irqrestore(&h->scan_lock, flags);
2269
2270 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
2271
2272 spin_lock_irqsave(&h->scan_lock, flags);
2273 h->scan_finished = 1;
2274 wake_up_all(&h->scan_wait_queue);
2275 spin_unlock_irqrestore(&h->scan_lock, flags);
2276}
2277
2278static int hpsa_scan_finished(struct Scsi_Host *sh,
2279 unsigned long elapsed_time)
2280{
2281 struct ctlr_info *h = shost_to_hba(sh);
2282 unsigned long flags;
2283 int finished;
2284
2285 spin_lock_irqsave(&h->scan_lock, flags);
2286 finished = h->scan_finished;
2287 spin_unlock_irqrestore(&h->scan_lock, flags);
2288 return finished;
2289}
2290
2291static int hpsa_change_queue_depth(struct scsi_device *sdev,
2292 int qdepth, int reason)
2293{
2294 struct ctlr_info *h = sdev_to_hba(sdev);
2295
2296 if (reason != SCSI_QDEPTH_DEFAULT)
2297 return -ENOTSUPP;
2298
2299 if (qdepth < 1)
2300 qdepth = 1;
2301 else
2302 if (qdepth > h->nr_cmds)
2303 qdepth = h->nr_cmds;
2304 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2305 return sdev->queue_depth;
2306}
2307
2308static void hpsa_unregister_scsi(struct ctlr_info *h)
2309{
2310
2311 scsi_remove_host(h->scsi_host);
2312 scsi_host_put(h->scsi_host);
2313 h->scsi_host = NULL;
2314}
2315
2316static int hpsa_register_scsi(struct ctlr_info *h)
2317{
2318 struct Scsi_Host *sh;
2319 int error;
2320
2321 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
2322 if (sh == NULL)
2323 goto fail;
2324
2325 sh->io_port = 0;
2326 sh->n_io_port = 0;
2327 sh->this_id = -1;
2328 sh->max_channel = 3;
2329 sh->max_cmd_len = MAX_COMMAND_SIZE;
2330 sh->max_lun = HPSA_MAX_LUN;
2331 sh->max_id = HPSA_MAX_LUN;
2332 sh->can_queue = h->nr_cmds;
2333 sh->cmd_per_lun = h->nr_cmds;
2334 sh->sg_tablesize = h->maxsgentries;
2335 h->scsi_host = sh;
2336 sh->hostdata[0] = (unsigned long) h;
2337 sh->irq = h->intr[h->intr_mode];
2338 sh->unique_id = sh->irq;
2339 error = scsi_add_host(sh, &h->pdev->dev);
2340 if (error)
2341 goto fail_host_put;
2342 scsi_scan_host(sh);
2343 return 0;
2344
2345 fail_host_put:
2346 dev_err(&h->pdev->dev, "%s: scsi_add_host"
2347 " failed for controller %d\n", __func__, h->ctlr);
2348 scsi_host_put(sh);
2349 return error;
2350 fail:
2351 dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
2352 " failed for controller %d\n", __func__, h->ctlr);
2353 return -ENOMEM;
2354}
2355
2356static int wait_for_device_to_become_ready(struct ctlr_info *h,
2357 unsigned char lunaddr[])
2358{
2359 int rc = 0;
2360 int count = 0;
2361 int waittime = 1;
2362 struct CommandList *c;
2363
2364 c = cmd_special_alloc(h);
2365 if (!c) {
2366 dev_warn(&h->pdev->dev, "out of memory in "
2367 "wait_for_device_to_become_ready.\n");
2368 return IO_ERROR;
2369 }
2370
2371
2372 while (count < HPSA_TUR_RETRY_LIMIT) {
2373
2374
2375
2376
2377 msleep(1000 * waittime);
2378 count++;
2379
2380
2381 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
2382 waittime = waittime * 2;
2383
2384
2385 (void) fill_cmd(c, TEST_UNIT_READY, h,
2386 NULL, 0, 0, lunaddr, TYPE_CMD);
2387 hpsa_scsi_do_simple_cmd_core(h, c);
2388
2389
2390 if (c->err_info->CommandStatus == CMD_SUCCESS)
2391 break;
2392
2393 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2394 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
2395 (c->err_info->SenseInfo[2] == NO_SENSE ||
2396 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
2397 break;
2398
2399 dev_warn(&h->pdev->dev, "waiting %d secs "
2400 "for device to become ready.\n", waittime);
2401 rc = 1;
2402 }
2403
2404 if (rc)
2405 dev_warn(&h->pdev->dev, "giving up on device.\n");
2406 else
2407 dev_warn(&h->pdev->dev, "device is ready.\n");
2408
2409 cmd_special_free(h, c);
2410 return rc;
2411}
2412
2413
2414
2415
2416static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
2417{
2418 int rc;
2419 struct ctlr_info *h;
2420 struct hpsa_scsi_dev_t *dev;
2421
2422
2423 h = sdev_to_hba(scsicmd->device);
2424 if (h == NULL)
2425 return FAILED;
2426 dev = scsicmd->device->hostdata;
2427 if (!dev) {
2428 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
2429 "device lookup failed.\n");
2430 return FAILED;
2431 }
2432 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
2433 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
2434
2435 rc = hpsa_send_reset(h, dev->scsi3addr);
2436 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
2437 return SUCCESS;
2438
2439 dev_warn(&h->pdev->dev, "resetting device failed.\n");
2440 return FAILED;
2441}
2442
2443static void swizzle_abort_tag(u8 *tag)
2444{
2445 u8 original_tag[8];
2446
2447 memcpy(original_tag, tag, 8);
2448 tag[0] = original_tag[3];
2449 tag[1] = original_tag[2];
2450 tag[2] = original_tag[1];
2451 tag[3] = original_tag[0];
2452 tag[4] = original_tag[7];
2453 tag[5] = original_tag[6];
2454 tag[6] = original_tag[5];
2455 tag[7] = original_tag[4];
2456}
2457
2458static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
2459 struct CommandList *abort, int swizzle)
2460{
2461 int rc = IO_OK;
2462 struct CommandList *c;
2463 struct ErrorInfo *ei;
2464
2465 c = cmd_special_alloc(h);
2466 if (c == NULL) {
2467 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2468 return -ENOMEM;
2469 }
2470
2471
2472 (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort,
2473 0, 0, scsi3addr, TYPE_MSG);
2474 if (swizzle)
2475 swizzle_abort_tag(&c->Request.CDB[4]);
2476 hpsa_scsi_do_simple_cmd_core(h, c);
2477 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
2478 __func__, abort->Header.Tag.upper, abort->Header.Tag.lower);
2479
2480
2481 ei = c->err_info;
2482 switch (ei->CommandStatus) {
2483 case CMD_SUCCESS:
2484 break;
2485 case CMD_UNABORTABLE:
2486 rc = -1;
2487 break;
2488 default:
2489 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
2490 __func__, abort->Header.Tag.upper,
2491 abort->Header.Tag.lower);
2492 hpsa_scsi_interpret_error(c);
2493 rc = -1;
2494 break;
2495 }
2496 cmd_special_free(h, c);
2497 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
2498 abort->Header.Tag.upper, abort->Header.Tag.lower);
2499 return rc;
2500}
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h,
2515 struct scsi_cmnd *find, struct list_head *queue_head)
2516{
2517 unsigned long flags;
2518 struct CommandList *c = NULL;
2519
2520 if (!find)
2521 return 0;
2522 spin_lock_irqsave(&h->lock, flags);
2523 list_for_each_entry(c, queue_head, list) {
2524 if (c->scsi_cmd == NULL)
2525 continue;
2526 if (c->scsi_cmd == find) {
2527 spin_unlock_irqrestore(&h->lock, flags);
2528 return c;
2529 }
2530 }
2531 spin_unlock_irqrestore(&h->lock, flags);
2532 return NULL;
2533}
2534
2535static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h,
2536 u8 *tag, struct list_head *queue_head)
2537{
2538 unsigned long flags;
2539 struct CommandList *c;
2540
2541 spin_lock_irqsave(&h->lock, flags);
2542 list_for_each_entry(c, queue_head, list) {
2543 if (memcmp(&c->Header.Tag, tag, 8) != 0)
2544 continue;
2545 spin_unlock_irqrestore(&h->lock, flags);
2546 return c;
2547 }
2548 spin_unlock_irqrestore(&h->lock, flags);
2549 return NULL;
2550}
2551
2552
2553
2554
2555
2556
2557
2558static int hpsa_send_abort_both_ways(struct ctlr_info *h,
2559 unsigned char *scsi3addr, struct CommandList *abort)
2560{
2561 u8 swizzled_tag[8];
2562 struct CommandList *c;
2563 int rc = 0, rc2 = 0;
2564
2565
2566
2567
2568
2569 memcpy(swizzled_tag, &abort->Request.CDB[4], 8);
2570 swizzle_abort_tag(swizzled_tag);
2571 c = hpsa_find_cmd_in_queue_by_tag(h, swizzled_tag, &h->cmpQ);
2572 if (c != NULL) {
2573 dev_warn(&h->pdev->dev, "Unexpectedly found byte-swapped tag in completion queue.\n");
2574 return hpsa_send_abort(h, scsi3addr, abort, 0);
2575 }
2576 rc = hpsa_send_abort(h, scsi3addr, abort, 0);
2577
2578
2579
2580
2581
2582 c = hpsa_find_cmd_in_queue(h, abort->scsi_cmd, &h->cmpQ);
2583 if (c)
2584 rc2 = hpsa_send_abort(h, scsi3addr, abort, 1);
2585 return rc && rc2;
2586}
2587
2588
2589
2590
2591
2592static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
2593{
2594
2595 int i, rc;
2596 struct ctlr_info *h;
2597 struct hpsa_scsi_dev_t *dev;
2598 struct CommandList *abort;
2599 struct CommandList *found;
2600 struct scsi_cmnd *as;
2601 char msg[256];
2602 int ml = 0;
2603
2604
2605 h = sdev_to_hba(sc->device);
2606 if (WARN(h == NULL,
2607 "ABORT REQUEST FAILED, Controller lookup failed.\n"))
2608 return FAILED;
2609
2610
2611 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
2612 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
2613 return FAILED;
2614
2615 memset(msg, 0, sizeof(msg));
2616 ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%d ",
2617 h->scsi_host->host_no, sc->device->channel,
2618 sc->device->id, sc->device->lun);
2619
2620
2621 dev = sc->device->hostdata;
2622 if (!dev) {
2623 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
2624 msg);
2625 return FAILED;
2626 }
2627
2628
2629 abort = (struct CommandList *) sc->host_scribble;
2630 if (abort == NULL) {
2631 dev_err(&h->pdev->dev, "%s FAILED, Command to abort is NULL.\n",
2632 msg);
2633 return FAILED;
2634 }
2635
2636 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ",
2637 abort->Header.Tag.upper, abort->Header.Tag.lower);
2638 as = (struct scsi_cmnd *) abort->scsi_cmd;
2639 if (as != NULL)
2640 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
2641 as->cmnd[0], as->serial_number);
2642 dev_dbg(&h->pdev->dev, "%s\n", msg);
2643 dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n",
2644 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
2645
2646
2647
2648
2649
2650 found = hpsa_find_cmd_in_queue(h, sc, &h->reqQ);
2651 if (found) {
2652 found->err_info->CommandStatus = CMD_ABORTED;
2653 finish_cmd(found);
2654 dev_info(&h->pdev->dev, "%s Request SUCCEEDED (driver queue).\n",
2655 msg);
2656 return SUCCESS;
2657 }
2658
2659
2660 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
2661 if (!found) {
2662 dev_dbg(&h->pdev->dev, "%s Request SUCCEEDED (not known to driver).\n",
2663 msg);
2664 return SUCCESS;
2665 }
2666
2667
2668
2669
2670
2671
2672 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort);
2673 if (rc != 0) {
2674 dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg);
2675 dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n",
2676 h->scsi_host->host_no,
2677 dev->bus, dev->target, dev->lun);
2678 return FAILED;
2679 }
2680 dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
2681
2682
2683
2684
2685
2686
2687#define ABORT_COMPLETE_WAIT_SECS 30
2688 for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
2689 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
2690 if (!found)
2691 return SUCCESS;
2692 msleep(100);
2693 }
2694 dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
2695 msg, ABORT_COMPLETE_WAIT_SECS);
2696 return FAILED;
2697}
2698
2699
2700
2701
2702
2703
2704
2705
2706static struct CommandList *cmd_alloc(struct ctlr_info *h)
2707{
2708 struct CommandList *c;
2709 int i;
2710 union u64bit temp64;
2711 dma_addr_t cmd_dma_handle, err_dma_handle;
2712 unsigned long flags;
2713
2714 spin_lock_irqsave(&h->lock, flags);
2715 do {
2716 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
2717 if (i == h->nr_cmds) {
2718 spin_unlock_irqrestore(&h->lock, flags);
2719 return NULL;
2720 }
2721 } while (test_and_set_bit
2722 (i & (BITS_PER_LONG - 1),
2723 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
2724 h->nr_allocs++;
2725 spin_unlock_irqrestore(&h->lock, flags);
2726
2727 c = h->cmd_pool + i;
2728 memset(c, 0, sizeof(*c));
2729 cmd_dma_handle = h->cmd_pool_dhandle
2730 + i * sizeof(*c);
2731 c->err_info = h->errinfo_pool + i;
2732 memset(c->err_info, 0, sizeof(*c->err_info));
2733 err_dma_handle = h->errinfo_pool_dhandle
2734 + i * sizeof(*c->err_info);
2735
2736 c->cmdindex = i;
2737
2738 INIT_LIST_HEAD(&c->list);
2739 c->busaddr = (u32) cmd_dma_handle;
2740 temp64.val = (u64) err_dma_handle;
2741 c->ErrDesc.Addr.lower = temp64.val32.lower;
2742 c->ErrDesc.Addr.upper = temp64.val32.upper;
2743 c->ErrDesc.Len = sizeof(*c->err_info);
2744
2745 c->h = h;
2746 return c;
2747}
2748
2749
2750
2751
2752
2753static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
2754{
2755 struct CommandList *c;
2756 union u64bit temp64;
2757 dma_addr_t cmd_dma_handle, err_dma_handle;
2758
2759 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
2760 if (c == NULL)
2761 return NULL;
2762 memset(c, 0, sizeof(*c));
2763
2764 c->cmdindex = -1;
2765
2766 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
2767 &err_dma_handle);
2768
2769 if (c->err_info == NULL) {
2770 pci_free_consistent(h->pdev,
2771 sizeof(*c), c, cmd_dma_handle);
2772 return NULL;
2773 }
2774 memset(c->err_info, 0, sizeof(*c->err_info));
2775
2776 INIT_LIST_HEAD(&c->list);
2777 c->busaddr = (u32) cmd_dma_handle;
2778 temp64.val = (u64) err_dma_handle;
2779 c->ErrDesc.Addr.lower = temp64.val32.lower;
2780 c->ErrDesc.Addr.upper = temp64.val32.upper;
2781 c->ErrDesc.Len = sizeof(*c->err_info);
2782
2783 c->h = h;
2784 return c;
2785}
2786
2787static void cmd_free(struct ctlr_info *h, struct CommandList *c)
2788{
2789 int i;
2790 unsigned long flags;
2791
2792 i = c - h->cmd_pool;
2793 spin_lock_irqsave(&h->lock, flags);
2794 clear_bit(i & (BITS_PER_LONG - 1),
2795 h->cmd_pool_bits + (i / BITS_PER_LONG));
2796 h->nr_frees++;
2797 spin_unlock_irqrestore(&h->lock, flags);
2798}
2799
2800static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
2801{
2802 union u64bit temp64;
2803
2804 temp64.val32.lower = c->ErrDesc.Addr.lower;
2805 temp64.val32.upper = c->ErrDesc.Addr.upper;
2806 pci_free_consistent(h->pdev, sizeof(*c->err_info),
2807 c->err_info, (dma_addr_t) temp64.val);
2808 pci_free_consistent(h->pdev, sizeof(*c),
2809 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
2810}
2811
2812#ifdef CONFIG_COMPAT
2813
2814static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
2815{
2816 IOCTL32_Command_struct __user *arg32 =
2817 (IOCTL32_Command_struct __user *) arg;
2818 IOCTL_Command_struct arg64;
2819 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
2820 int err;
2821 u32 cp;
2822
2823 memset(&arg64, 0, sizeof(arg64));
2824 err = 0;
2825 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2826 sizeof(arg64.LUN_info));
2827 err |= copy_from_user(&arg64.Request, &arg32->Request,
2828 sizeof(arg64.Request));
2829 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2830 sizeof(arg64.error_info));
2831 err |= get_user(arg64.buf_size, &arg32->buf_size);
2832 err |= get_user(cp, &arg32->buf);
2833 arg64.buf = compat_ptr(cp);
2834 err |= copy_to_user(p, &arg64, sizeof(arg64));
2835
2836 if (err)
2837 return -EFAULT;
2838
2839 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p);
2840 if (err)
2841 return err;
2842 err |= copy_in_user(&arg32->error_info, &p->error_info,
2843 sizeof(arg32->error_info));
2844 if (err)
2845 return -EFAULT;
2846 return err;
2847}
2848
2849static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2850 int cmd, void *arg)
2851{
2852 BIG_IOCTL32_Command_struct __user *arg32 =
2853 (BIG_IOCTL32_Command_struct __user *) arg;
2854 BIG_IOCTL_Command_struct arg64;
2855 BIG_IOCTL_Command_struct __user *p =
2856 compat_alloc_user_space(sizeof(arg64));
2857 int err;
2858 u32 cp;
2859
2860 memset(&arg64, 0, sizeof(arg64));
2861 err = 0;
2862 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2863 sizeof(arg64.LUN_info));
2864 err |= copy_from_user(&arg64.Request, &arg32->Request,
2865 sizeof(arg64.Request));
2866 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2867 sizeof(arg64.error_info));
2868 err |= get_user(arg64.buf_size, &arg32->buf_size);
2869 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
2870 err |= get_user(cp, &arg32->buf);
2871 arg64.buf = compat_ptr(cp);
2872 err |= copy_to_user(p, &arg64, sizeof(arg64));
2873
2874 if (err)
2875 return -EFAULT;
2876
2877 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
2878 if (err)
2879 return err;
2880 err |= copy_in_user(&arg32->error_info, &p->error_info,
2881 sizeof(arg32->error_info));
2882 if (err)
2883 return -EFAULT;
2884 return err;
2885}
2886
2887static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
2888{
2889 switch (cmd) {
2890 case CCISS_GETPCIINFO:
2891 case CCISS_GETINTINFO:
2892 case CCISS_SETINTINFO:
2893 case CCISS_GETNODENAME:
2894 case CCISS_SETNODENAME:
2895 case CCISS_GETHEARTBEAT:
2896 case CCISS_GETBUSTYPES:
2897 case CCISS_GETFIRMVER:
2898 case CCISS_GETDRIVVER:
2899 case CCISS_REVALIDVOLS:
2900 case CCISS_DEREGDISK:
2901 case CCISS_REGNEWDISK:
2902 case CCISS_REGNEWD:
2903 case CCISS_RESCANDISK:
2904 case CCISS_GETLUNINFO:
2905 return hpsa_ioctl(dev, cmd, arg);
2906
2907 case CCISS_PASSTHRU32:
2908 return hpsa_ioctl32_passthru(dev, cmd, arg);
2909 case CCISS_BIG_PASSTHRU32:
2910 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
2911
2912 default:
2913 return -ENOIOCTLCMD;
2914 }
2915}
2916#endif
2917
2918static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
2919{
2920 struct hpsa_pci_info pciinfo;
2921
2922 if (!argp)
2923 return -EINVAL;
2924 pciinfo.domain = pci_domain_nr(h->pdev->bus);
2925 pciinfo.bus = h->pdev->bus->number;
2926 pciinfo.dev_fn = h->pdev->devfn;
2927 pciinfo.board_id = h->board_id;
2928 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
2929 return -EFAULT;
2930 return 0;
2931}
2932
2933static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
2934{
2935 DriverVer_type DriverVer;
2936 unsigned char vmaj, vmin, vsubmin;
2937 int rc;
2938
2939 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
2940 &vmaj, &vmin, &vsubmin);
2941 if (rc != 3) {
2942 dev_info(&h->pdev->dev, "driver version string '%s' "
2943 "unrecognized.", HPSA_DRIVER_VERSION);
2944 vmaj = 0;
2945 vmin = 0;
2946 vsubmin = 0;
2947 }
2948 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
2949 if (!argp)
2950 return -EINVAL;
2951 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
2952 return -EFAULT;
2953 return 0;
2954}
2955
2956static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2957{
2958 IOCTL_Command_struct iocommand;
2959 struct CommandList *c;
2960 char *buff = NULL;
2961 union u64bit temp64;
2962 int rc = 0;
2963
2964 if (!argp)
2965 return -EINVAL;
2966 if (!capable(CAP_SYS_RAWIO))
2967 return -EPERM;
2968 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
2969 return -EFAULT;
2970 if ((iocommand.buf_size < 1) &&
2971 (iocommand.Request.Type.Direction != XFER_NONE)) {
2972 return -EINVAL;
2973 }
2974 if (iocommand.buf_size > 0) {
2975 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
2976 if (buff == NULL)
2977 return -EFAULT;
2978 if (iocommand.Request.Type.Direction == XFER_WRITE) {
2979
2980 if (copy_from_user(buff, iocommand.buf,
2981 iocommand.buf_size)) {
2982 rc = -EFAULT;
2983 goto out_kfree;
2984 }
2985 } else {
2986 memset(buff, 0, iocommand.buf_size);
2987 }
2988 }
2989 c = cmd_special_alloc(h);
2990 if (c == NULL) {
2991 rc = -ENOMEM;
2992 goto out_kfree;
2993 }
2994
2995 c->cmd_type = CMD_IOCTL_PEND;
2996
2997 c->Header.ReplyQueue = 0;
2998 if (iocommand.buf_size > 0) {
2999 c->Header.SGList = 1;
3000 c->Header.SGTotal = 1;
3001 } else {
3002 c->Header.SGList = 0;
3003 c->Header.SGTotal = 0;
3004 }
3005 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
3006
3007 c->Header.Tag.lower = c->busaddr;
3008
3009
3010 memcpy(&c->Request, &iocommand.Request,
3011 sizeof(c->Request));
3012
3013
3014 if (iocommand.buf_size > 0) {
3015 temp64.val = pci_map_single(h->pdev, buff,
3016 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
3017 if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
3018 c->SG[0].Addr.lower = 0;
3019 c->SG[0].Addr.upper = 0;
3020 c->SG[0].Len = 0;
3021 rc = -ENOMEM;
3022 goto out;
3023 }
3024 c->SG[0].Addr.lower = temp64.val32.lower;
3025 c->SG[0].Addr.upper = temp64.val32.upper;
3026 c->SG[0].Len = iocommand.buf_size;
3027 c->SG[0].Ext = 0;
3028 }
3029 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
3030 if (iocommand.buf_size > 0)
3031 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
3032 check_ioctl_unit_attention(h, c);
3033
3034
3035 memcpy(&iocommand.error_info, c->err_info,
3036 sizeof(iocommand.error_info));
3037 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
3038 rc = -EFAULT;
3039 goto out;
3040 }
3041 if (iocommand.Request.Type.Direction == XFER_READ &&
3042 iocommand.buf_size > 0) {
3043
3044 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
3045 rc = -EFAULT;
3046 goto out;
3047 }
3048 }
3049out:
3050 cmd_special_free(h, c);
3051out_kfree:
3052 kfree(buff);
3053 return rc;
3054}
3055
3056static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
3057{
3058 BIG_IOCTL_Command_struct *ioc;
3059 struct CommandList *c;
3060 unsigned char **buff = NULL;
3061 int *buff_size = NULL;
3062 union u64bit temp64;
3063 BYTE sg_used = 0;
3064 int status = 0;
3065 int i;
3066 u32 left;
3067 u32 sz;
3068 BYTE __user *data_ptr;
3069
3070 if (!argp)
3071 return -EINVAL;
3072 if (!capable(CAP_SYS_RAWIO))
3073 return -EPERM;
3074 ioc = (BIG_IOCTL_Command_struct *)
3075 kmalloc(sizeof(*ioc), GFP_KERNEL);
3076 if (!ioc) {
3077 status = -ENOMEM;
3078 goto cleanup1;
3079 }
3080 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
3081 status = -EFAULT;
3082 goto cleanup1;
3083 }
3084 if ((ioc->buf_size < 1) &&
3085 (ioc->Request.Type.Direction != XFER_NONE)) {
3086 status = -EINVAL;
3087 goto cleanup1;
3088 }
3089
3090 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
3091 status = -EINVAL;
3092 goto cleanup1;
3093 }
3094 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
3095 status = -EINVAL;
3096 goto cleanup1;
3097 }
3098 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
3099 if (!buff) {
3100 status = -ENOMEM;
3101 goto cleanup1;
3102 }
3103 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
3104 if (!buff_size) {
3105 status = -ENOMEM;
3106 goto cleanup1;
3107 }
3108 left = ioc->buf_size;
3109 data_ptr = ioc->buf;
3110 while (left) {
3111 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
3112 buff_size[sg_used] = sz;
3113 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
3114 if (buff[sg_used] == NULL) {
3115 status = -ENOMEM;
3116 goto cleanup1;
3117 }
3118 if (ioc->Request.Type.Direction == XFER_WRITE) {
3119 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
3120 status = -ENOMEM;
3121 goto cleanup1;
3122 }
3123 } else
3124 memset(buff[sg_used], 0, sz);
3125 left -= sz;
3126 data_ptr += sz;
3127 sg_used++;
3128 }
3129 c = cmd_special_alloc(h);
3130 if (c == NULL) {
3131 status = -ENOMEM;
3132 goto cleanup1;
3133 }
3134 c->cmd_type = CMD_IOCTL_PEND;
3135 c->Header.ReplyQueue = 0;
3136 c->Header.SGList = c->Header.SGTotal = sg_used;
3137 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
3138 c->Header.Tag.lower = c->busaddr;
3139 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
3140 if (ioc->buf_size > 0) {
3141 int i;
3142 for (i = 0; i < sg_used; i++) {
3143 temp64.val = pci_map_single(h->pdev, buff[i],
3144 buff_size[i], PCI_DMA_BIDIRECTIONAL);
3145 if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
3146 c->SG[i].Addr.lower = 0;
3147 c->SG[i].Addr.upper = 0;
3148 c->SG[i].Len = 0;
3149 hpsa_pci_unmap(h->pdev, c, i,
3150 PCI_DMA_BIDIRECTIONAL);
3151 status = -ENOMEM;
3152 goto cleanup1;
3153 }
3154 c->SG[i].Addr.lower = temp64.val32.lower;
3155 c->SG[i].Addr.upper = temp64.val32.upper;
3156 c->SG[i].Len = buff_size[i];
3157
3158 c->SG[i].Ext = 0;
3159 }
3160 }
3161 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
3162 if (sg_used)
3163 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
3164 check_ioctl_unit_attention(h, c);
3165
3166 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
3167 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
3168 cmd_special_free(h, c);
3169 status = -EFAULT;
3170 goto cleanup1;
3171 }
3172 if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) {
3173
3174 BYTE __user *ptr = ioc->buf;
3175 for (i = 0; i < sg_used; i++) {
3176 if (copy_to_user(ptr, buff[i], buff_size[i])) {
3177 cmd_special_free(h, c);
3178 status = -EFAULT;
3179 goto cleanup1;
3180 }
3181 ptr += buff_size[i];
3182 }
3183 }
3184 cmd_special_free(h, c);
3185 status = 0;
3186cleanup1:
3187 if (buff) {
3188 for (i = 0; i < sg_used; i++)
3189 kfree(buff[i]);
3190 kfree(buff);
3191 }
3192 kfree(buff_size);
3193 kfree(ioc);
3194 return status;
3195}
3196
3197static void check_ioctl_unit_attention(struct ctlr_info *h,
3198 struct CommandList *c)
3199{
3200 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
3201 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
3202 (void) check_for_unit_attention(h, c);
3203}
3204
3205
3206
3207static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
3208{
3209 struct ctlr_info *h;
3210 void __user *argp = (void __user *)arg;
3211
3212 h = sdev_to_hba(dev);
3213
3214 switch (cmd) {
3215 case CCISS_DEREGDISK:
3216 case CCISS_REGNEWDISK:
3217 case CCISS_REGNEWD:
3218 hpsa_scan_start(h->scsi_host);
3219 return 0;
3220 case CCISS_GETPCIINFO:
3221 return hpsa_getpciinfo_ioctl(h, argp);
3222 case CCISS_GETDRIVVER:
3223 return hpsa_getdrivver_ioctl(h, argp);
3224 case CCISS_PASSTHRU:
3225 return hpsa_passthru_ioctl(h, argp);
3226 case CCISS_BIG_PASSTHRU:
3227 return hpsa_big_passthru_ioctl(h, argp);
3228 default:
3229 return -ENOTTY;
3230 }
3231}
3232
3233static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
3234 u8 reset_type)
3235{
3236 struct CommandList *c;
3237
3238 c = cmd_alloc(h);
3239 if (!c)
3240 return -ENOMEM;
3241
3242 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
3243 RAID_CTLR_LUNID, TYPE_MSG);
3244 c->Request.CDB[1] = reset_type;
3245 c->waiting = NULL;
3246 enqueue_cmd_and_start_io(h, c);
3247
3248
3249
3250
3251 return 0;
3252}
3253
3254static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
3255 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
3256 int cmd_type)
3257{
3258 int pci_dir = XFER_NONE;
3259 struct CommandList *a;
3260
3261 c->cmd_type = CMD_IOCTL_PEND;
3262 c->Header.ReplyQueue = 0;
3263 if (buff != NULL && size > 0) {
3264 c->Header.SGList = 1;
3265 c->Header.SGTotal = 1;
3266 } else {
3267 c->Header.SGList = 0;
3268 c->Header.SGTotal = 0;
3269 }
3270 c->Header.Tag.lower = c->busaddr;
3271 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
3272
3273 c->Request.Type.Type = cmd_type;
3274 if (cmd_type == TYPE_CMD) {
3275 switch (cmd) {
3276 case HPSA_INQUIRY:
3277
3278 if (page_code != 0) {
3279 c->Request.CDB[1] = 0x01;
3280 c->Request.CDB[2] = page_code;
3281 }
3282 c->Request.CDBLen = 6;
3283 c->Request.Type.Attribute = ATTR_SIMPLE;
3284 c->Request.Type.Direction = XFER_READ;
3285 c->Request.Timeout = 0;
3286 c->Request.CDB[0] = HPSA_INQUIRY;
3287 c->Request.CDB[4] = size & 0xFF;
3288 break;
3289 case HPSA_REPORT_LOG:
3290 case HPSA_REPORT_PHYS:
3291
3292
3293
3294 c->Request.CDBLen = 12;
3295 c->Request.Type.Attribute = ATTR_SIMPLE;
3296 c->Request.Type.Direction = XFER_READ;
3297 c->Request.Timeout = 0;
3298 c->Request.CDB[0] = cmd;
3299 c->Request.CDB[6] = (size >> 24) & 0xFF;
3300 c->Request.CDB[7] = (size >> 16) & 0xFF;
3301 c->Request.CDB[8] = (size >> 8) & 0xFF;
3302 c->Request.CDB[9] = size & 0xFF;
3303 break;
3304 case HPSA_CACHE_FLUSH:
3305 c->Request.CDBLen = 12;
3306 c->Request.Type.Attribute = ATTR_SIMPLE;
3307 c->Request.Type.Direction = XFER_WRITE;
3308 c->Request.Timeout = 0;
3309 c->Request.CDB[0] = BMIC_WRITE;
3310 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
3311 c->Request.CDB[7] = (size >> 8) & 0xFF;
3312 c->Request.CDB[8] = size & 0xFF;
3313 break;
3314 case TEST_UNIT_READY:
3315 c->Request.CDBLen = 6;
3316 c->Request.Type.Attribute = ATTR_SIMPLE;
3317 c->Request.Type.Direction = XFER_NONE;
3318 c->Request.Timeout = 0;
3319 break;
3320 default:
3321 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
3322 BUG();
3323 return -1;
3324 }
3325 } else if (cmd_type == TYPE_MSG) {
3326 switch (cmd) {
3327
3328 case HPSA_DEVICE_RESET_MSG:
3329 c->Request.CDBLen = 16;
3330 c->Request.Type.Type = 1;
3331 c->Request.Type.Attribute = ATTR_SIMPLE;
3332 c->Request.Type.Direction = XFER_NONE;
3333 c->Request.Timeout = 0;
3334 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
3335 c->Request.CDB[0] = cmd;
3336 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
3337
3338
3339 c->Request.CDB[4] = 0x00;
3340 c->Request.CDB[5] = 0x00;
3341 c->Request.CDB[6] = 0x00;
3342 c->Request.CDB[7] = 0x00;
3343 break;
3344 case HPSA_ABORT_MSG:
3345 a = buff;
3346 dev_dbg(&h->pdev->dev, "Abort Tag:0x%08x:%08x using request Tag:0x%08x:%08x\n",
3347 a->Header.Tag.upper, a->Header.Tag.lower,
3348 c->Header.Tag.upper, c->Header.Tag.lower);
3349 c->Request.CDBLen = 16;
3350 c->Request.Type.Type = TYPE_MSG;
3351 c->Request.Type.Attribute = ATTR_SIMPLE;
3352 c->Request.Type.Direction = XFER_WRITE;
3353 c->Request.Timeout = 0;
3354 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
3355 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
3356 c->Request.CDB[2] = 0x00;
3357 c->Request.CDB[3] = 0x00;
3358
3359 c->Request.CDB[4] = a->Header.Tag.lower & 0xFF;
3360 c->Request.CDB[5] = (a->Header.Tag.lower >> 8) & 0xFF;
3361 c->Request.CDB[6] = (a->Header.Tag.lower >> 16) & 0xFF;
3362 c->Request.CDB[7] = (a->Header.Tag.lower >> 24) & 0xFF;
3363 c->Request.CDB[8] = a->Header.Tag.upper & 0xFF;
3364 c->Request.CDB[9] = (a->Header.Tag.upper >> 8) & 0xFF;
3365 c->Request.CDB[10] = (a->Header.Tag.upper >> 16) & 0xFF;
3366 c->Request.CDB[11] = (a->Header.Tag.upper >> 24) & 0xFF;
3367 c->Request.CDB[12] = 0x00;
3368 c->Request.CDB[13] = 0x00;
3369 c->Request.CDB[14] = 0x00;
3370 c->Request.CDB[15] = 0x00;
3371 break;
3372 default:
3373 dev_warn(&h->pdev->dev, "unknown message type %d\n",
3374 cmd);
3375 BUG();
3376 }
3377 } else {
3378 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
3379 BUG();
3380 }
3381
3382 switch (c->Request.Type.Direction) {
3383 case XFER_READ:
3384 pci_dir = PCI_DMA_FROMDEVICE;
3385 break;
3386 case XFER_WRITE:
3387 pci_dir = PCI_DMA_TODEVICE;
3388 break;
3389 case XFER_NONE:
3390 pci_dir = PCI_DMA_NONE;
3391 break;
3392 default:
3393 pci_dir = PCI_DMA_BIDIRECTIONAL;
3394 }
3395 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
3396 return -1;
3397 return 0;
3398}
3399
3400
3401
3402
3403static void __iomem *remap_pci_mem(ulong base, ulong size)
3404{
3405 ulong page_base = ((ulong) base) & PAGE_MASK;
3406 ulong page_offs = ((ulong) base) - page_base;
3407 void __iomem *page_remapped = ioremap_nocache(page_base,
3408 page_offs + size);
3409
3410 return page_remapped ? (page_remapped + page_offs) : NULL;
3411}
3412
3413
3414
3415
3416static void start_io(struct ctlr_info *h)
3417{
3418 struct CommandList *c;
3419 unsigned long flags;
3420
3421 spin_lock_irqsave(&h->lock, flags);
3422 while (!list_empty(&h->reqQ)) {
3423 c = list_entry(h->reqQ.next, struct CommandList, list);
3424
3425 if ((h->access.fifo_full(h))) {
3426 dev_warn(&h->pdev->dev, "fifo full\n");
3427 break;
3428 }
3429
3430
3431 removeQ(c);
3432 h->Qdepth--;
3433
3434
3435 addQ(&h->cmpQ, c);
3436
3437
3438
3439
3440
3441 h->commands_outstanding++;
3442 if (h->commands_outstanding > h->max_outstanding)
3443 h->max_outstanding = h->commands_outstanding;
3444
3445
3446 spin_unlock_irqrestore(&h->lock, flags);
3447 h->access.submit_command(h, c);
3448 spin_lock_irqsave(&h->lock, flags);
3449 }
3450 spin_unlock_irqrestore(&h->lock, flags);
3451}
3452
3453static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
3454{
3455 return h->access.command_completed(h, q);
3456}
3457
3458static inline bool interrupt_pending(struct ctlr_info *h)
3459{
3460 return h->access.intr_pending(h);
3461}
3462
3463static inline long interrupt_not_for_us(struct ctlr_info *h)
3464{
3465 return (h->access.intr_pending(h) == 0) ||
3466 (h->interrupts_enabled == 0);
3467}
3468
3469static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
3470 u32 raw_tag)
3471{
3472 if (unlikely(tag_index >= h->nr_cmds)) {
3473 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
3474 return 1;
3475 }
3476 return 0;
3477}
3478
3479static inline void finish_cmd(struct CommandList *c)
3480{
3481 unsigned long flags;
3482
3483 spin_lock_irqsave(&c->h->lock, flags);
3484 removeQ(c);
3485 spin_unlock_irqrestore(&c->h->lock, flags);
3486 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
3487 if (likely(c->cmd_type == CMD_SCSI))
3488 complete_scsi_command(c);
3489 else if (c->cmd_type == CMD_IOCTL_PEND)
3490 complete(c->waiting);
3491}
3492
3493static inline u32 hpsa_tag_contains_index(u32 tag)
3494{
3495 return tag & DIRECT_LOOKUP_BIT;
3496}
3497
3498static inline u32 hpsa_tag_to_index(u32 tag)
3499{
3500 return tag >> DIRECT_LOOKUP_SHIFT;
3501}
3502
3503
3504static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
3505{
3506#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
3507#define HPSA_SIMPLE_ERROR_BITS 0x03
3508 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
3509 return tag & ~HPSA_SIMPLE_ERROR_BITS;
3510 return tag & ~HPSA_PERF_ERROR_BITS;
3511}
3512
3513
3514static inline void process_indexed_cmd(struct ctlr_info *h,
3515 u32 raw_tag)
3516{
3517 u32 tag_index;
3518 struct CommandList *c;
3519
3520 tag_index = hpsa_tag_to_index(raw_tag);
3521 if (!bad_tag(h, tag_index, raw_tag)) {
3522 c = h->cmd_pool + tag_index;
3523 finish_cmd(c);
3524 }
3525}
3526
3527
3528static inline void process_nonindexed_cmd(struct ctlr_info *h,
3529 u32 raw_tag)
3530{
3531 u32 tag;
3532 struct CommandList *c = NULL;
3533 unsigned long flags;
3534
3535 tag = hpsa_tag_discard_error_bits(h, raw_tag);
3536 spin_lock_irqsave(&h->lock, flags);
3537 list_for_each_entry(c, &h->cmpQ, list) {
3538 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
3539 spin_unlock_irqrestore(&h->lock, flags);
3540 finish_cmd(c);
3541 return;
3542 }
3543 }
3544 spin_unlock_irqrestore(&h->lock, flags);
3545 bad_tag(h, h->nr_cmds + 1, raw_tag);
3546}
3547
3548
3549
3550
3551
3552
3553static int ignore_bogus_interrupt(struct ctlr_info *h)
3554{
3555 if (likely(!reset_devices))
3556 return 0;
3557
3558 if (likely(h->interrupts_enabled))
3559 return 0;
3560
3561 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
3562 "(known firmware bug.) Ignoring.\n");
3563
3564 return 1;
3565}
3566
3567
3568
3569
3570
3571
3572static struct ctlr_info *queue_to_hba(u8 *queue)
3573{
3574 return container_of((queue - *queue), struct ctlr_info, q[0]);
3575}
3576
3577static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
3578{
3579 struct ctlr_info *h = queue_to_hba(queue);
3580 u8 q = *(u8 *) queue;
3581 u32 raw_tag;
3582
3583 if (ignore_bogus_interrupt(h))
3584 return IRQ_NONE;
3585
3586 if (interrupt_not_for_us(h))
3587 return IRQ_NONE;
3588 h->last_intr_timestamp = get_jiffies_64();
3589 while (interrupt_pending(h)) {
3590 raw_tag = get_next_completion(h, q);
3591 while (raw_tag != FIFO_EMPTY)
3592 raw_tag = next_command(h, q);
3593 }
3594 return IRQ_HANDLED;
3595}
3596
3597static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
3598{
3599 struct ctlr_info *h = queue_to_hba(queue);
3600 u32 raw_tag;
3601 u8 q = *(u8 *) queue;
3602
3603 if (ignore_bogus_interrupt(h))
3604 return IRQ_NONE;
3605
3606 h->last_intr_timestamp = get_jiffies_64();
3607 raw_tag = get_next_completion(h, q);
3608 while (raw_tag != FIFO_EMPTY)
3609 raw_tag = next_command(h, q);
3610 return IRQ_HANDLED;
3611}
3612
3613static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
3614{
3615 struct ctlr_info *h = queue_to_hba((u8 *) queue);
3616 u32 raw_tag;
3617 u8 q = *(u8 *) queue;
3618
3619 if (interrupt_not_for_us(h))
3620 return IRQ_NONE;
3621 h->last_intr_timestamp = get_jiffies_64();
3622 while (interrupt_pending(h)) {
3623 raw_tag = get_next_completion(h, q);
3624 while (raw_tag != FIFO_EMPTY) {
3625 if (likely(hpsa_tag_contains_index(raw_tag)))
3626 process_indexed_cmd(h, raw_tag);
3627 else
3628 process_nonindexed_cmd(h, raw_tag);
3629 raw_tag = next_command(h, q);
3630 }
3631 }
3632 return IRQ_HANDLED;
3633}
3634
3635static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
3636{
3637 struct ctlr_info *h = queue_to_hba(queue);
3638 u32 raw_tag;
3639 u8 q = *(u8 *) queue;
3640
3641 h->last_intr_timestamp = get_jiffies_64();
3642 raw_tag = get_next_completion(h, q);
3643 while (raw_tag != FIFO_EMPTY) {
3644 if (likely(hpsa_tag_contains_index(raw_tag)))
3645 process_indexed_cmd(h, raw_tag);
3646 else
3647 process_nonindexed_cmd(h, raw_tag);
3648 raw_tag = next_command(h, q);
3649 }
3650 return IRQ_HANDLED;
3651}
3652
3653
3654
3655
3656
3657static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
3658 unsigned char type)
3659{
3660 struct Command {
3661 struct CommandListHeader CommandHeader;
3662 struct RequestBlock Request;
3663 struct ErrDescriptor ErrorDescriptor;
3664 };
3665 struct Command *cmd;
3666 static const size_t cmd_sz = sizeof(*cmd) +
3667 sizeof(cmd->ErrorDescriptor);
3668 dma_addr_t paddr64;
3669 uint32_t paddr32, tag;
3670 void __iomem *vaddr;
3671 int i, err;
3672
3673 vaddr = pci_ioremap_bar(pdev, 0);
3674 if (vaddr == NULL)
3675 return -ENOMEM;
3676
3677
3678
3679
3680
3681 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3682 if (err) {
3683 iounmap(vaddr);
3684 return -ENOMEM;
3685 }
3686
3687 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
3688 if (cmd == NULL) {
3689 iounmap(vaddr);
3690 return -ENOMEM;
3691 }
3692
3693
3694
3695
3696
3697 paddr32 = paddr64;
3698
3699 cmd->CommandHeader.ReplyQueue = 0;
3700 cmd->CommandHeader.SGList = 0;
3701 cmd->CommandHeader.SGTotal = 0;
3702 cmd->CommandHeader.Tag.lower = paddr32;
3703 cmd->CommandHeader.Tag.upper = 0;
3704 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
3705
3706 cmd->Request.CDBLen = 16;
3707 cmd->Request.Type.Type = TYPE_MSG;
3708 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
3709 cmd->Request.Type.Direction = XFER_NONE;
3710 cmd->Request.Timeout = 0;
3711 cmd->Request.CDB[0] = opcode;
3712 cmd->Request.CDB[1] = type;
3713 memset(&cmd->Request.CDB[2], 0, 14);
3714 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
3715 cmd->ErrorDescriptor.Addr.upper = 0;
3716 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
3717
3718 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
3719
3720 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
3721 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
3722 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
3723 break;
3724 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
3725 }
3726
3727 iounmap(vaddr);
3728
3729
3730
3731
3732 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
3733 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
3734 opcode, type);
3735 return -ETIMEDOUT;
3736 }
3737
3738 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
3739
3740 if (tag & HPSA_ERROR_BIT) {
3741 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
3742 opcode, type);
3743 return -EIO;
3744 }
3745
3746 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
3747 opcode, type);
3748 return 0;
3749}
3750
3751#define hpsa_noop(p) hpsa_message(p, 3, 0)
3752
3753static int hpsa_controller_hard_reset(struct pci_dev *pdev,
3754 void * __iomem vaddr, u32 use_doorbell)
3755{
3756 u16 pmcsr;
3757 int pos;
3758
3759 if (use_doorbell) {
3760
3761
3762
3763
3764 dev_info(&pdev->dev, "using doorbell to reset controller\n");
3765 writel(use_doorbell, vaddr + SA5_DOORBELL);
3766 } else {
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
3777 if (pos == 0) {
3778 dev_err(&pdev->dev,
3779 "hpsa_reset_controller: "
3780 "PCI PM not supported\n");
3781 return -ENODEV;
3782 }
3783 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
3784
3785 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
3786 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3787 pmcsr |= PCI_D3hot;
3788 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3789
3790 msleep(500);
3791
3792
3793 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3794 pmcsr |= PCI_D0;
3795 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3796
3797
3798
3799
3800
3801
3802 msleep(500);
3803 }
3804 return 0;
3805}
3806
3807static void init_driver_version(char *driver_version, int len)
3808{
3809 memset(driver_version, 0, len);
3810 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
3811}
3812
3813static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
3814{
3815 char *driver_version;
3816 int i, size = sizeof(cfgtable->driver_version);
3817
3818 driver_version = kmalloc(size, GFP_KERNEL);
3819 if (!driver_version)
3820 return -ENOMEM;
3821
3822 init_driver_version(driver_version, size);
3823 for (i = 0; i < size; i++)
3824 writeb(driver_version[i], &cfgtable->driver_version[i]);
3825 kfree(driver_version);
3826 return 0;
3827}
3828
3829static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
3830 unsigned char *driver_ver)
3831{
3832 int i;
3833
3834 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
3835 driver_ver[i] = readb(&cfgtable->driver_version[i]);
3836}
3837
3838static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
3839{
3840
3841 char *driver_ver, *old_driver_ver;
3842 int rc, size = sizeof(cfgtable->driver_version);
3843
3844 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
3845 if (!old_driver_ver)
3846 return -ENOMEM;
3847 driver_ver = old_driver_ver + size;
3848
3849
3850
3851
3852 init_driver_version(old_driver_ver, size);
3853 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
3854 rc = !memcmp(driver_ver, old_driver_ver, size);
3855 kfree(old_driver_ver);
3856 return rc;
3857}
3858
3859
3860
3861static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3862{
3863 u64 cfg_offset;
3864 u32 cfg_base_addr;
3865 u64 cfg_base_addr_index;
3866 void __iomem *vaddr;
3867 unsigned long paddr;
3868 u32 misc_fw_support;
3869 int rc;
3870 struct CfgTable __iomem *cfgtable;
3871 u32 use_doorbell;
3872 u32 board_id;
3873 u16 command_register;
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888 rc = hpsa_lookup_board_id(pdev, &board_id);
3889 if (rc < 0 || !ctlr_is_resettable(board_id)) {
3890 dev_warn(&pdev->dev, "Not resetting device.\n");
3891 return -ENODEV;
3892 }
3893
3894
3895 if (!ctlr_is_hard_resettable(board_id))
3896 return -ENOTSUPP;
3897
3898
3899 pci_read_config_word(pdev, 4, &command_register);
3900
3901
3902
3903 pci_disable_device(pdev);
3904 pci_save_state(pdev);
3905
3906
3907 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
3908 if (rc)
3909 return rc;
3910 vaddr = remap_pci_mem(paddr, 0x250);
3911 if (!vaddr)
3912 return -ENOMEM;
3913
3914
3915 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
3916 &cfg_base_addr_index, &cfg_offset);
3917 if (rc)
3918 goto unmap_vaddr;
3919 cfgtable = remap_pci_mem(pci_resource_start(pdev,
3920 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
3921 if (!cfgtable) {
3922 rc = -ENOMEM;
3923 goto unmap_vaddr;
3924 }
3925 rc = write_driver_ver_to_cfgtable(cfgtable);
3926 if (rc)
3927 goto unmap_vaddr;
3928
3929
3930
3931
3932 misc_fw_support = readl(&cfgtable->misc_fw_support);
3933 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
3934 if (use_doorbell) {
3935 use_doorbell = DOORBELL_CTLR_RESET2;
3936 } else {
3937 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
3938 if (use_doorbell) {
3939 dev_warn(&pdev->dev, "Soft reset not supported. "
3940 "Firmware update is required.\n");
3941 rc = -ENOTSUPP;
3942 goto unmap_cfgtable;
3943 }
3944 }
3945
3946 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
3947 if (rc)
3948 goto unmap_cfgtable;
3949
3950 pci_restore_state(pdev);
3951 rc = pci_enable_device(pdev);
3952 if (rc) {
3953 dev_warn(&pdev->dev, "failed to enable device.\n");
3954 goto unmap_cfgtable;
3955 }
3956 pci_write_config_word(pdev, 4, command_register);
3957
3958
3959
3960 msleep(HPSA_POST_RESET_PAUSE_MSECS);
3961
3962
3963 dev_info(&pdev->dev, "Waiting for board to reset.\n");
3964 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
3965 if (rc) {
3966 dev_warn(&pdev->dev,
3967 "failed waiting for board to reset."
3968 " Will try soft reset.\n");
3969 rc = -ENOTSUPP;
3970 goto unmap_cfgtable;
3971 }
3972 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
3973 if (rc) {
3974 dev_warn(&pdev->dev,
3975 "failed waiting for board to become ready "
3976 "after hard reset\n");
3977 goto unmap_cfgtable;
3978 }
3979
3980 rc = controller_reset_failed(vaddr);
3981 if (rc < 0)
3982 goto unmap_cfgtable;
3983 if (rc) {
3984 dev_warn(&pdev->dev, "Unable to successfully reset "
3985 "controller. Will try soft reset.\n");
3986 rc = -ENOTSUPP;
3987 } else {
3988 dev_info(&pdev->dev, "board ready after hard reset.\n");
3989 }
3990
3991unmap_cfgtable:
3992 iounmap(cfgtable);
3993
3994unmap_vaddr:
3995 iounmap(vaddr);
3996 return rc;
3997}
3998
3999
4000
4001
4002
4003
4004static void print_cfg_table(struct device *dev, struct CfgTable *tb)
4005{
4006#ifdef HPSA_DEBUG
4007 int i;
4008 char temp_name[17];
4009
4010 dev_info(dev, "Controller Configuration information\n");
4011 dev_info(dev, "------------------------------------\n");
4012 for (i = 0; i < 4; i++)
4013 temp_name[i] = readb(&(tb->Signature[i]));
4014 temp_name[4] = '\0';
4015 dev_info(dev, " Signature = %s\n", temp_name);
4016 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
4017 dev_info(dev, " Transport methods supported = 0x%x\n",
4018 readl(&(tb->TransportSupport)));
4019 dev_info(dev, " Transport methods active = 0x%x\n",
4020 readl(&(tb->TransportActive)));
4021 dev_info(dev, " Requested transport Method = 0x%x\n",
4022 readl(&(tb->HostWrite.TransportRequest)));
4023 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
4024 readl(&(tb->HostWrite.CoalIntDelay)));
4025 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
4026 readl(&(tb->HostWrite.CoalIntCount)));
4027 dev_info(dev, " Max outstanding commands = 0x%d\n",
4028 readl(&(tb->CmdsOutMax)));
4029 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
4030 for (i = 0; i < 16; i++)
4031 temp_name[i] = readb(&(tb->ServerName[i]));
4032 temp_name[16] = '\0';
4033 dev_info(dev, " Server Name = %s\n", temp_name);
4034 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
4035 readl(&(tb->HeartBeat)));
4036#endif
4037}
4038
4039static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
4040{
4041 int i, offset, mem_type, bar_type;
4042
4043 if (pci_bar_addr == PCI_BASE_ADDRESS_0)
4044 return 0;
4045 offset = 0;
4046 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
4047 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
4048 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
4049 offset += 4;
4050 else {
4051 mem_type = pci_resource_flags(pdev, i) &
4052 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
4053 switch (mem_type) {
4054 case PCI_BASE_ADDRESS_MEM_TYPE_32:
4055 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
4056 offset += 4;
4057 break;
4058 case PCI_BASE_ADDRESS_MEM_TYPE_64:
4059 offset += 8;
4060 break;
4061 default:
4062 dev_warn(&pdev->dev,
4063 "base address is invalid\n");
4064 return -1;
4065 break;
4066 }
4067 }
4068 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
4069 return i + 1;
4070 }
4071 return -1;
4072}
4073
4074
4075
4076
4077
4078static void hpsa_interrupt_mode(struct ctlr_info *h)
4079{
4080#ifdef CONFIG_PCI_MSI
4081 int err, i;
4082 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
4083
4084 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
4085 hpsa_msix_entries[i].vector = 0;
4086 hpsa_msix_entries[i].entry = i;
4087 }
4088
4089
4090 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
4091 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
4092 goto default_int_mode;
4093 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
4094 dev_info(&h->pdev->dev, "MSIX\n");
4095 err = pci_enable_msix(h->pdev, hpsa_msix_entries,
4096 MAX_REPLY_QUEUES);
4097 if (!err) {
4098 for (i = 0; i < MAX_REPLY_QUEUES; i++)
4099 h->intr[i] = hpsa_msix_entries[i].vector;
4100 h->msix_vector = 1;
4101 return;
4102 }
4103 if (err > 0) {
4104 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
4105 "available\n", err);
4106 goto default_int_mode;
4107 } else {
4108 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
4109 err);
4110 goto default_int_mode;
4111 }
4112 }
4113 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
4114 dev_info(&h->pdev->dev, "MSI\n");
4115 if (!pci_enable_msi(h->pdev))
4116 h->msi_vector = 1;
4117 else
4118 dev_warn(&h->pdev->dev, "MSI init failed\n");
4119 }
4120default_int_mode:
4121#endif
4122
4123 h->intr[h->intr_mode] = h->pdev->irq;
4124}
4125
4126static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
4127{
4128 int i;
4129 u32 subsystem_vendor_id, subsystem_device_id;
4130
4131 subsystem_vendor_id = pdev->subsystem_vendor;
4132 subsystem_device_id = pdev->subsystem_device;
4133 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
4134 subsystem_vendor_id;
4135
4136 for (i = 0; i < ARRAY_SIZE(products); i++)
4137 if (*board_id == products[i].board_id)
4138 return i;
4139
4140 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
4141 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
4142 !hpsa_allow_any) {
4143 dev_warn(&pdev->dev, "unrecognized board ID: "
4144 "0x%08x, ignoring.\n", *board_id);
4145 return -ENODEV;
4146 }
4147 return ARRAY_SIZE(products) - 1;
4148}
4149
4150static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
4151 unsigned long *memory_bar)
4152{
4153 int i;
4154
4155 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
4156 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
4157
4158 *memory_bar = pci_resource_start(pdev, i);
4159 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
4160 *memory_bar);
4161 return 0;
4162 }
4163 dev_warn(&pdev->dev, "no memory BAR found\n");
4164 return -ENODEV;
4165}
4166
4167static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
4168 int wait_for_ready)
4169{
4170 int i, iterations;
4171 u32 scratchpad;
4172 if (wait_for_ready)
4173 iterations = HPSA_BOARD_READY_ITERATIONS;
4174 else
4175 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
4176
4177 for (i = 0; i < iterations; i++) {
4178 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
4179 if (wait_for_ready) {
4180 if (scratchpad == HPSA_FIRMWARE_READY)
4181 return 0;
4182 } else {
4183 if (scratchpad != HPSA_FIRMWARE_READY)
4184 return 0;
4185 }
4186 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
4187 }
4188 dev_warn(&pdev->dev, "board not ready, timed out.\n");
4189 return -ENODEV;
4190}
4191
4192static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
4193 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
4194 u64 *cfg_offset)
4195{
4196 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
4197 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
4198 *cfg_base_addr &= (u32) 0x0000ffff;
4199 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
4200 if (*cfg_base_addr_index == -1) {
4201 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
4202 return -ENODEV;
4203 }
4204 return 0;
4205}
4206
4207static int hpsa_find_cfgtables(struct ctlr_info *h)
4208{
4209 u64 cfg_offset;
4210 u32 cfg_base_addr;
4211 u64 cfg_base_addr_index;
4212 u32 trans_offset;
4213 int rc;
4214
4215 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
4216 &cfg_base_addr_index, &cfg_offset);
4217 if (rc)
4218 return rc;
4219 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
4220 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
4221 if (!h->cfgtable)
4222 return -ENOMEM;
4223 rc = write_driver_ver_to_cfgtable(h->cfgtable);
4224 if (rc)
4225 return rc;
4226
4227 trans_offset = readl(&h->cfgtable->TransMethodOffset);
4228 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
4229 cfg_base_addr_index)+cfg_offset+trans_offset,
4230 sizeof(*h->transtable));
4231 if (!h->transtable)
4232 return -ENOMEM;
4233 return 0;
4234}
4235
4236static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
4237{
4238 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
4239
4240
4241 if (reset_devices && h->max_commands > 32)
4242 h->max_commands = 32;
4243
4244 if (h->max_commands < 16) {
4245 dev_warn(&h->pdev->dev, "Controller reports "
4246 "max supported commands of %d, an obvious lie. "
4247 "Using 16. Ensure that firmware is up to date.\n",
4248 h->max_commands);
4249 h->max_commands = 16;
4250 }
4251}
4252
4253
4254
4255
4256
4257static void hpsa_find_board_params(struct ctlr_info *h)
4258{
4259 hpsa_get_max_perf_mode_cmds(h);
4260 h->nr_cmds = h->max_commands - 4;
4261 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
4262
4263
4264
4265
4266 h->max_cmd_sg_entries = 31;
4267 if (h->maxsgentries > 512) {
4268 h->max_cmd_sg_entries = 32;
4269 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
4270 h->maxsgentries--;
4271 } else {
4272 h->maxsgentries = 31;
4273 h->chainsize = 0;
4274 }
4275
4276
4277 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
4278}
4279
4280static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
4281{
4282 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
4283 dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
4284 return false;
4285 }
4286 return true;
4287}
4288
4289
4290static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h)
4291{
4292#ifdef CONFIG_X86
4293 u32 prefetch;
4294
4295 prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
4296 prefetch |= 0x100;
4297 writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
4298#endif
4299}
4300
4301
4302
4303
4304static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
4305{
4306 u32 dma_prefetch;
4307
4308 if (h->board_id != 0x3225103C)
4309 return;
4310 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
4311 dma_prefetch |= 0x8000;
4312 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
4313}
4314
4315static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
4316{
4317 int i;
4318 u32 doorbell_value;
4319 unsigned long flags;
4320
4321
4322
4323
4324
4325 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
4326 spin_lock_irqsave(&h->lock, flags);
4327 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
4328 spin_unlock_irqrestore(&h->lock, flags);
4329 if (!(doorbell_value & CFGTBL_ChangeReq))
4330 break;
4331
4332 usleep_range(10000, 20000);
4333 }
4334}
4335
4336static int hpsa_enter_simple_mode(struct ctlr_info *h)
4337{
4338 u32 trans_support;
4339
4340 trans_support = readl(&(h->cfgtable->TransportSupport));
4341 if (!(trans_support & SIMPLE_MODE))
4342 return -ENOTSUPP;
4343
4344 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
4345
4346 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
4347 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
4348 hpsa_wait_for_mode_change_ack(h);
4349 print_cfg_table(&h->pdev->dev, h->cfgtable);
4350 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
4351 dev_warn(&h->pdev->dev,
4352 "unable to get board into simple mode\n");
4353 return -ENODEV;
4354 }
4355 h->transMethod = CFGTBL_Trans_Simple;
4356 return 0;
4357}
4358
4359static int hpsa_pci_init(struct ctlr_info *h)
4360{
4361 int prod_index, err;
4362
4363 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
4364 if (prod_index < 0)
4365 return -ENODEV;
4366 h->product_name = products[prod_index].product_name;
4367 h->access = *(products[prod_index].access);
4368
4369 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
4370 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
4371
4372 err = pci_enable_device(h->pdev);
4373 if (err) {
4374 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
4375 return err;
4376 }
4377
4378
4379 pci_set_master(h->pdev);
4380
4381 err = pci_request_regions(h->pdev, HPSA);
4382 if (err) {
4383 dev_err(&h->pdev->dev,
4384 "cannot obtain PCI resources, aborting\n");
4385 return err;
4386 }
4387 hpsa_interrupt_mode(h);
4388 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
4389 if (err)
4390 goto err_out_free_res;
4391 h->vaddr = remap_pci_mem(h->paddr, 0x250);
4392 if (!h->vaddr) {
4393 err = -ENOMEM;
4394 goto err_out_free_res;
4395 }
4396 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
4397 if (err)
4398 goto err_out_free_res;
4399 err = hpsa_find_cfgtables(h);
4400 if (err)
4401 goto err_out_free_res;
4402 hpsa_find_board_params(h);
4403
4404 if (!hpsa_CISS_signature_present(h)) {
4405 err = -ENODEV;
4406 goto err_out_free_res;
4407 }
4408 hpsa_enable_scsi_prefetch(h);
4409 hpsa_p600_dma_prefetch_quirk(h);
4410 err = hpsa_enter_simple_mode(h);
4411 if (err)
4412 goto err_out_free_res;
4413 return 0;
4414
4415err_out_free_res:
4416 if (h->transtable)
4417 iounmap(h->transtable);
4418 if (h->cfgtable)
4419 iounmap(h->cfgtable);
4420 if (h->vaddr)
4421 iounmap(h->vaddr);
4422 pci_disable_device(h->pdev);
4423 pci_release_regions(h->pdev);
4424 return err;
4425}
4426
4427static void hpsa_hba_inquiry(struct ctlr_info *h)
4428{
4429 int rc;
4430
4431#define HBA_INQUIRY_BYTE_COUNT 64
4432 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
4433 if (!h->hba_inquiry_data)
4434 return;
4435 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
4436 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
4437 if (rc != 0) {
4438 kfree(h->hba_inquiry_data);
4439 h->hba_inquiry_data = NULL;
4440 }
4441}
4442
4443static int hpsa_init_reset_devices(struct pci_dev *pdev)
4444{
4445 int rc, i;
4446
4447 if (!reset_devices)
4448 return 0;
4449
4450
4451 rc = hpsa_kdump_hard_reset_controller(pdev);
4452
4453
4454
4455
4456
4457
4458 if (rc == -ENOTSUPP)
4459 return rc;
4460 if (rc)
4461 return -ENODEV;
4462
4463
4464 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
4465 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
4466 if (hpsa_noop(pdev) == 0)
4467 break;
4468 else
4469 dev_warn(&pdev->dev, "no-op failed%s\n",
4470 (i < 11 ? "; re-trying" : ""));
4471 }
4472 return 0;
4473}
4474
4475static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
4476{
4477 h->cmd_pool_bits = kzalloc(
4478 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
4479 sizeof(unsigned long), GFP_KERNEL);
4480 h->cmd_pool = pci_alloc_consistent(h->pdev,
4481 h->nr_cmds * sizeof(*h->cmd_pool),
4482 &(h->cmd_pool_dhandle));
4483 h->errinfo_pool = pci_alloc_consistent(h->pdev,
4484 h->nr_cmds * sizeof(*h->errinfo_pool),
4485 &(h->errinfo_pool_dhandle));
4486 if ((h->cmd_pool_bits == NULL)
4487 || (h->cmd_pool == NULL)
4488 || (h->errinfo_pool == NULL)) {
4489 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
4490 return -ENOMEM;
4491 }
4492 return 0;
4493}
4494
4495static void hpsa_free_cmd_pool(struct ctlr_info *h)
4496{
4497 kfree(h->cmd_pool_bits);
4498 if (h->cmd_pool)
4499 pci_free_consistent(h->pdev,
4500 h->nr_cmds * sizeof(struct CommandList),
4501 h->cmd_pool, h->cmd_pool_dhandle);
4502 if (h->errinfo_pool)
4503 pci_free_consistent(h->pdev,
4504 h->nr_cmds * sizeof(struct ErrorInfo),
4505 h->errinfo_pool,
4506 h->errinfo_pool_dhandle);
4507}
4508
4509static int hpsa_request_irq(struct ctlr_info *h,
4510 irqreturn_t (*msixhandler)(int, void *),
4511 irqreturn_t (*intxhandler)(int, void *))
4512{
4513 int rc, i;
4514
4515
4516
4517
4518
4519 for (i = 0; i < MAX_REPLY_QUEUES; i++)
4520 h->q[i] = (u8) i;
4521
4522 if (h->intr_mode == PERF_MODE_INT && h->msix_vector) {
4523
4524 for (i = 0; i < MAX_REPLY_QUEUES; i++)
4525 rc = request_irq(h->intr[i], msixhandler,
4526 0, h->devname,
4527 &h->q[i]);
4528 } else {
4529
4530 if (h->msix_vector || h->msi_vector) {
4531 rc = request_irq(h->intr[h->intr_mode],
4532 msixhandler, 0, h->devname,
4533 &h->q[h->intr_mode]);
4534 } else {
4535 rc = request_irq(h->intr[h->intr_mode],
4536 intxhandler, IRQF_SHARED, h->devname,
4537 &h->q[h->intr_mode]);
4538 }
4539 }
4540 if (rc) {
4541 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
4542 h->intr[h->intr_mode], h->devname);
4543 return -ENODEV;
4544 }
4545 return 0;
4546}
4547
4548static int hpsa_kdump_soft_reset(struct ctlr_info *h)
4549{
4550 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
4551 HPSA_RESET_TYPE_CONTROLLER)) {
4552 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
4553 return -EIO;
4554 }
4555
4556 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
4557 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
4558 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
4559 return -1;
4560 }
4561
4562 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
4563 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
4564 dev_warn(&h->pdev->dev, "Board failed to become ready "
4565 "after soft reset.\n");
4566 return -1;
4567 }
4568
4569 return 0;
4570}
4571
4572static void free_irqs(struct ctlr_info *h)
4573{
4574 int i;
4575
4576 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
4577
4578 i = h->intr_mode;
4579 free_irq(h->intr[i], &h->q[i]);
4580 return;
4581 }
4582
4583 for (i = 0; i < MAX_REPLY_QUEUES; i++)
4584 free_irq(h->intr[i], &h->q[i]);
4585}
4586
4587static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
4588{
4589 free_irqs(h);
4590#ifdef CONFIG_PCI_MSI
4591 if (h->msix_vector) {
4592 if (h->pdev->msix_enabled)
4593 pci_disable_msix(h->pdev);
4594 } else if (h->msi_vector) {
4595 if (h->pdev->msi_enabled)
4596 pci_disable_msi(h->pdev);
4597 }
4598#endif
4599}
4600
4601static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
4602{
4603 hpsa_free_irqs_and_disable_msix(h);
4604 hpsa_free_sg_chain_blocks(h);
4605 hpsa_free_cmd_pool(h);
4606 kfree(h->blockFetchTable);
4607 pci_free_consistent(h->pdev, h->reply_pool_size,
4608 h->reply_pool, h->reply_pool_dhandle);
4609 if (h->vaddr)
4610 iounmap(h->vaddr);
4611 if (h->transtable)
4612 iounmap(h->transtable);
4613 if (h->cfgtable)
4614 iounmap(h->cfgtable);
4615 pci_release_regions(h->pdev);
4616 kfree(h);
4617}
4618
4619static void remove_ctlr_from_lockup_detector_list(struct ctlr_info *h)
4620{
4621 assert_spin_locked(&lockup_detector_lock);
4622 if (!hpsa_lockup_detector)
4623 return;
4624 if (h->lockup_detected)
4625 return;
4626 list_del(&h->lockup_list);
4627}
4628
4629
4630static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
4631{
4632 struct CommandList *c = NULL;
4633
4634 assert_spin_locked(&h->lock);
4635
4636 while (!list_empty(list)) {
4637 c = list_entry(list->next, struct CommandList, list);
4638 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
4639 finish_cmd(c);
4640 }
4641}
4642
4643static void controller_lockup_detected(struct ctlr_info *h)
4644{
4645 unsigned long flags;
4646
4647 assert_spin_locked(&lockup_detector_lock);
4648 remove_ctlr_from_lockup_detector_list(h);
4649 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4650 spin_lock_irqsave(&h->lock, flags);
4651 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
4652 spin_unlock_irqrestore(&h->lock, flags);
4653 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
4654 h->lockup_detected);
4655 pci_disable_device(h->pdev);
4656 spin_lock_irqsave(&h->lock, flags);
4657 fail_all_cmds_on_list(h, &h->cmpQ);
4658 fail_all_cmds_on_list(h, &h->reqQ);
4659 spin_unlock_irqrestore(&h->lock, flags);
4660}
4661
4662static void detect_controller_lockup(struct ctlr_info *h)
4663{
4664 u64 now;
4665 u32 heartbeat;
4666 unsigned long flags;
4667
4668 assert_spin_locked(&lockup_detector_lock);
4669 now = get_jiffies_64();
4670
4671 if (time_after64(h->last_intr_timestamp +
4672 (h->heartbeat_sample_interval), now))
4673 return;
4674
4675
4676
4677
4678
4679
4680 if (time_after64(h->last_heartbeat_timestamp +
4681 (h->heartbeat_sample_interval), now))
4682 return;
4683
4684
4685 spin_lock_irqsave(&h->lock, flags);
4686 heartbeat = readl(&h->cfgtable->HeartBeat);
4687 spin_unlock_irqrestore(&h->lock, flags);
4688 if (h->last_heartbeat == heartbeat) {
4689 controller_lockup_detected(h);
4690 return;
4691 }
4692
4693
4694 h->last_heartbeat = heartbeat;
4695 h->last_heartbeat_timestamp = now;
4696}
4697
4698static int detect_controller_lockup_thread(void *notused)
4699{
4700 struct ctlr_info *h;
4701 unsigned long flags;
4702
4703 while (1) {
4704 struct list_head *this, *tmp;
4705
4706 schedule_timeout_interruptible(HEARTBEAT_SAMPLE_INTERVAL);
4707 if (kthread_should_stop())
4708 break;
4709 spin_lock_irqsave(&lockup_detector_lock, flags);
4710 list_for_each_safe(this, tmp, &hpsa_ctlr_list) {
4711 h = list_entry(this, struct ctlr_info, lockup_list);
4712 detect_controller_lockup(h);
4713 }
4714 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4715 }
4716 return 0;
4717}
4718
4719static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h)
4720{
4721 unsigned long flags;
4722
4723 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
4724 spin_lock_irqsave(&lockup_detector_lock, flags);
4725 list_add_tail(&h->lockup_list, &hpsa_ctlr_list);
4726 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4727}
4728
4729static void start_controller_lockup_detector(struct ctlr_info *h)
4730{
4731
4732 if (!hpsa_lockup_detector) {
4733 spin_lock_init(&lockup_detector_lock);
4734 hpsa_lockup_detector =
4735 kthread_run(detect_controller_lockup_thread,
4736 NULL, HPSA);
4737 }
4738 if (!hpsa_lockup_detector) {
4739 dev_warn(&h->pdev->dev,
4740 "Could not start lockup detector thread\n");
4741 return;
4742 }
4743 add_ctlr_to_lockup_detector_list(h);
4744}
4745
4746static void stop_controller_lockup_detector(struct ctlr_info *h)
4747{
4748 unsigned long flags;
4749
4750 spin_lock_irqsave(&lockup_detector_lock, flags);
4751 remove_ctlr_from_lockup_detector_list(h);
4752
4753 if (list_empty(&hpsa_ctlr_list)) {
4754 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4755 kthread_stop(hpsa_lockup_detector);
4756 spin_lock_irqsave(&lockup_detector_lock, flags);
4757 hpsa_lockup_detector = NULL;
4758 }
4759 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4760}
4761
4762static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4763{
4764 int dac, rc;
4765 struct ctlr_info *h;
4766 int try_soft_reset = 0;
4767 unsigned long flags;
4768
4769 if (number_of_controllers == 0)
4770 printk(KERN_INFO DRIVER_NAME "\n");
4771
4772 rc = hpsa_init_reset_devices(pdev);
4773 if (rc) {
4774 if (rc != -ENOTSUPP)
4775 return rc;
4776
4777
4778
4779
4780
4781 try_soft_reset = 1;
4782 rc = 0;
4783 }
4784
4785reinit_after_soft_reset:
4786
4787
4788
4789
4790
4791#define COMMANDLIST_ALIGNMENT 32
4792 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
4793 h = kzalloc(sizeof(*h), GFP_KERNEL);
4794 if (!h)
4795 return -ENOMEM;
4796
4797 h->pdev = pdev;
4798 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
4799 INIT_LIST_HEAD(&h->cmpQ);
4800 INIT_LIST_HEAD(&h->reqQ);
4801 spin_lock_init(&h->lock);
4802 spin_lock_init(&h->scan_lock);
4803 rc = hpsa_pci_init(h);
4804 if (rc != 0)
4805 goto clean1;
4806
4807 sprintf(h->devname, HPSA "%d", number_of_controllers);
4808 h->ctlr = number_of_controllers;
4809 number_of_controllers++;
4810
4811
4812 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4813 if (rc == 0) {
4814 dac = 1;
4815 } else {
4816 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4817 if (rc == 0) {
4818 dac = 0;
4819 } else {
4820 dev_err(&pdev->dev, "no suitable DMA available\n");
4821 goto clean1;
4822 }
4823 }
4824
4825
4826 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4827
4828 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
4829 goto clean2;
4830 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
4831 h->devname, pdev->device,
4832 h->intr[h->intr_mode], dac ? "" : " not");
4833 if (hpsa_allocate_cmd_pool(h))
4834 goto clean4;
4835 if (hpsa_allocate_sg_chain_blocks(h))
4836 goto clean4;
4837 init_waitqueue_head(&h->scan_wait_queue);
4838 h->scan_finished = 1;
4839
4840 pci_set_drvdata(pdev, h);
4841 h->ndevices = 0;
4842 h->scsi_host = NULL;
4843 spin_lock_init(&h->devlock);
4844 hpsa_put_ctlr_into_performant_mode(h);
4845
4846
4847
4848
4849
4850 if (try_soft_reset) {
4851
4852
4853
4854
4855
4856
4857
4858
4859 spin_lock_irqsave(&h->lock, flags);
4860 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4861 spin_unlock_irqrestore(&h->lock, flags);
4862 free_irqs(h);
4863 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
4864 hpsa_intx_discard_completions);
4865 if (rc) {
4866 dev_warn(&h->pdev->dev, "Failed to request_irq after "
4867 "soft reset.\n");
4868 goto clean4;
4869 }
4870
4871 rc = hpsa_kdump_soft_reset(h);
4872 if (rc)
4873
4874 goto clean4;
4875
4876 dev_info(&h->pdev->dev, "Board READY.\n");
4877 dev_info(&h->pdev->dev,
4878 "Waiting for stale completions to drain.\n");
4879 h->access.set_intr_mask(h, HPSA_INTR_ON);
4880 msleep(10000);
4881 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4882
4883 rc = controller_reset_failed(h->cfgtable);
4884 if (rc)
4885 dev_info(&h->pdev->dev,
4886 "Soft reset appears to have failed.\n");
4887
4888
4889
4890
4891
4892 hpsa_undo_allocations_after_kdump_soft_reset(h);
4893 try_soft_reset = 0;
4894 if (rc)
4895
4896 return -ENODEV;
4897
4898 goto reinit_after_soft_reset;
4899 }
4900
4901
4902 h->access.set_intr_mask(h, HPSA_INTR_ON);
4903
4904 hpsa_hba_inquiry(h);
4905 hpsa_register_scsi(h);
4906 start_controller_lockup_detector(h);
4907 return 1;
4908
4909clean4:
4910 hpsa_free_sg_chain_blocks(h);
4911 hpsa_free_cmd_pool(h);
4912 free_irqs(h);
4913clean2:
4914clean1:
4915 kfree(h);
4916 return rc;
4917}
4918
4919static void hpsa_flush_cache(struct ctlr_info *h)
4920{
4921 char *flush_buf;
4922 struct CommandList *c;
4923
4924 flush_buf = kzalloc(4, GFP_KERNEL);
4925 if (!flush_buf)
4926 return;
4927
4928 c = cmd_special_alloc(h);
4929 if (!c) {
4930 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
4931 goto out_of_memory;
4932 }
4933 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
4934 RAID_CTLR_LUNID, TYPE_CMD)) {
4935 goto out;
4936 }
4937 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
4938 if (c->err_info->CommandStatus != 0)
4939out:
4940 dev_warn(&h->pdev->dev,
4941 "error flushing cache on controller\n");
4942 cmd_special_free(h, c);
4943out_of_memory:
4944 kfree(flush_buf);
4945}
4946
4947static void hpsa_shutdown(struct pci_dev *pdev)
4948{
4949 struct ctlr_info *h;
4950
4951 h = pci_get_drvdata(pdev);
4952
4953
4954
4955
4956 hpsa_flush_cache(h);
4957 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4958 hpsa_free_irqs_and_disable_msix(h);
4959}
4960
4961static void hpsa_free_device_info(struct ctlr_info *h)
4962{
4963 int i;
4964
4965 for (i = 0; i < h->ndevices; i++)
4966 kfree(h->dev[i]);
4967}
4968
4969static void hpsa_remove_one(struct pci_dev *pdev)
4970{
4971 struct ctlr_info *h;
4972
4973 if (pci_get_drvdata(pdev) == NULL) {
4974 dev_err(&pdev->dev, "unable to remove device\n");
4975 return;
4976 }
4977 h = pci_get_drvdata(pdev);
4978 stop_controller_lockup_detector(h);
4979 hpsa_unregister_scsi(h);
4980 hpsa_shutdown(pdev);
4981 iounmap(h->vaddr);
4982 iounmap(h->transtable);
4983 iounmap(h->cfgtable);
4984 hpsa_free_device_info(h);
4985 hpsa_free_sg_chain_blocks(h);
4986 pci_free_consistent(h->pdev,
4987 h->nr_cmds * sizeof(struct CommandList),
4988 h->cmd_pool, h->cmd_pool_dhandle);
4989 pci_free_consistent(h->pdev,
4990 h->nr_cmds * sizeof(struct ErrorInfo),
4991 h->errinfo_pool, h->errinfo_pool_dhandle);
4992 pci_free_consistent(h->pdev, h->reply_pool_size,
4993 h->reply_pool, h->reply_pool_dhandle);
4994 kfree(h->cmd_pool_bits);
4995 kfree(h->blockFetchTable);
4996 kfree(h->hba_inquiry_data);
4997 pci_disable_device(pdev);
4998 pci_release_regions(pdev);
4999 pci_set_drvdata(pdev, NULL);
5000 kfree(h);
5001}
5002
5003static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
5004 __attribute__((unused)) pm_message_t state)
5005{
5006 return -ENOSYS;
5007}
5008
5009static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
5010{
5011 return -ENOSYS;
5012}
5013
5014static struct pci_driver hpsa_pci_driver = {
5015 .name = HPSA,
5016 .probe = hpsa_init_one,
5017 .remove = hpsa_remove_one,
5018 .id_table = hpsa_pci_device_id,
5019 .shutdown = hpsa_shutdown,
5020 .suspend = hpsa_suspend,
5021 .resume = hpsa_resume,
5022};
5023
5024
5025
5026
5027
5028
5029
5030
5031
5032
5033
5034
5035
5036static void calc_bucket_map(int bucket[], int num_buckets,
5037 int nsgs, int *bucket_map)
5038{
5039 int i, j, b, size;
5040
5041
5042#define MINIMUM_TRANSFER_BLOCKS 4
5043#define NUM_BUCKETS 8
5044
5045 for (i = 0; i <= nsgs; i++) {
5046
5047 size = i + MINIMUM_TRANSFER_BLOCKS;
5048 b = num_buckets;
5049
5050 for (j = 0; j < 8; j++) {
5051 if (bucket[j] >= size) {
5052 b = j;
5053 break;
5054 }
5055 }
5056
5057 bucket_map[i] = b;
5058 }
5059}
5060
5061static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
5062{
5063 int i;
5064 unsigned long register_value;
5065
5066
5067
5068
5069
5070
5071
5072
5073
5074
5075
5076
5077
5078
5079
5080
5081
5082
5083 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
5084 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
5085
5086
5087
5088
5089
5090
5091
5092 memset(h->reply_pool, 0, h->reply_pool_size);
5093
5094 bft[7] = SG_ENTRIES_IN_CMD + 4;
5095 calc_bucket_map(bft, ARRAY_SIZE(bft),
5096 SG_ENTRIES_IN_CMD, h->blockFetchTable);
5097 for (i = 0; i < 8; i++)
5098 writel(bft[i], &h->transtable->BlockFetch[i]);
5099
5100
5101 writel(h->max_commands, &h->transtable->RepQSize);
5102 writel(h->nreply_queues, &h->transtable->RepQCount);
5103 writel(0, &h->transtable->RepQCtrAddrLow32);
5104 writel(0, &h->transtable->RepQCtrAddrHigh32);
5105
5106 for (i = 0; i < h->nreply_queues; i++) {
5107 writel(0, &h->transtable->RepQAddr[i].upper);
5108 writel(h->reply_pool_dhandle +
5109 (h->max_commands * sizeof(u64) * i),
5110 &h->transtable->RepQAddr[i].lower);
5111 }
5112
5113 writel(CFGTBL_Trans_Performant | use_short_tags |
5114 CFGTBL_Trans_enable_directed_msix,
5115 &(h->cfgtable->HostWrite.TransportRequest));
5116 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
5117 hpsa_wait_for_mode_change_ack(h);
5118 register_value = readl(&(h->cfgtable->TransportActive));
5119 if (!(register_value & CFGTBL_Trans_Performant)) {
5120 dev_warn(&h->pdev->dev, "unable to get board into"
5121 " performant mode\n");
5122 return;
5123 }
5124
5125 h->access = SA5_performant_access;
5126 h->transMethod = CFGTBL_Trans_Performant;
5127}
5128
5129static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
5130{
5131 u32 trans_support;
5132 int i;
5133
5134 if (hpsa_simple_mode)
5135 return;
5136
5137 trans_support = readl(&(h->cfgtable->TransportSupport));
5138 if (!(trans_support & PERFORMANT_MODE))
5139 return;
5140
5141 h->nreply_queues = h->msix_vector ? MAX_REPLY_QUEUES : 1;
5142 hpsa_get_max_perf_mode_cmds(h);
5143
5144 h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues;
5145 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
5146 &(h->reply_pool_dhandle));
5147
5148 for (i = 0; i < h->nreply_queues; i++) {
5149 h->reply_queue[i].head = &h->reply_pool[h->max_commands * i];
5150 h->reply_queue[i].size = h->max_commands;
5151 h->reply_queue[i].wraparound = 1;
5152 h->reply_queue[i].current_entry = 0;
5153 }
5154
5155
5156 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
5157 sizeof(u32)), GFP_KERNEL);
5158
5159 if ((h->reply_pool == NULL)
5160 || (h->blockFetchTable == NULL))
5161 goto clean_up;
5162
5163 hpsa_enter_performant_mode(h,
5164 trans_support & CFGTBL_Trans_use_short_tags);
5165
5166 return;
5167
5168clean_up:
5169 if (h->reply_pool)
5170 pci_free_consistent(h->pdev, h->reply_pool_size,
5171 h->reply_pool, h->reply_pool_dhandle);
5172 kfree(h->blockFetchTable);
5173}
5174
5175
5176
5177
5178
5179static int __init hpsa_init(void)
5180{
5181 return pci_register_driver(&hpsa_pci_driver);
5182}
5183
5184static void __exit hpsa_cleanup(void)
5185{
5186 pci_unregister_driver(&hpsa_pci_driver);
5187}
5188
5189module_init(hpsa_init);
5190module_exit(hpsa_cleanup);
5191