1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/module.h>
22#include <linux/interrupt.h>
23#include <linux/types.h>
24#include <linux/pci.h>
25#include <linux/kernel.h>
26#include <linux/slab.h>
27#include <linux/delay.h>
28#include <linux/fs.h>
29#include <linux/timer.h>
30#include <linux/init.h>
31#include <linux/spinlock.h>
32#include <linux/compat.h>
33#include <linux/blktrace_api.h>
34#include <linux/uaccess.h>
35#include <linux/io.h>
36#include <linux/dma-mapping.h>
37#include <linux/completion.h>
38#include <linux/moduleparam.h>
39#include <scsi/scsi.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/scsi_device.h>
42#include <scsi/scsi_host.h>
43#include <scsi/scsi_tcq.h>
44#include <scsi/scsi_eh.h>
45#include <scsi/scsi_transport_sas.h>
46#include <scsi/scsi_dbg.h>
47#include <linux/cciss_ioctl.h>
48#include <linux/string.h>
49#include <linux/bitmap.h>
50#include <linux/atomic.h>
51#include <linux/jiffies.h>
52#include <linux/percpu-defs.h>
53#include <linux/percpu.h>
54#include <asm/unaligned.h>
55#include <asm/div64.h>
56#include "hpsa_cmd.h"
57#include "hpsa.h"
58
59
60
61
62
63#define HPSA_DRIVER_VERSION "3.4.20-200"
64#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
65#define HPSA "hpsa"
66
67
68#define CLEAR_EVENT_WAIT_INTERVAL 20
69#define MODE_CHANGE_WAIT_INTERVAL 10
70#define MAX_CLEAR_EVENT_WAIT 30000
71#define MAX_MODE_CHANGE_WAIT 2000
72#define MAX_IOCTL_CONFIG_WAIT 1000
73
74
75#define MAX_CMD_RETRIES 3
76
77#define HPSA_EH_PTRAID_TIMEOUT (240 * HZ)
78
79
80MODULE_AUTHOR("Hewlett-Packard Company");
81MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
82 HPSA_DRIVER_VERSION);
83MODULE_VERSION(HPSA_DRIVER_VERSION);
84MODULE_LICENSE("GPL");
85MODULE_ALIAS("cciss");
86
87static int hpsa_simple_mode;
88module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
89MODULE_PARM_DESC(hpsa_simple_mode,
90 "Use 'simple mode' rather than 'performant mode'");
91
92
93static const struct pci_device_id hpsa_pci_device_id[] = {
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1920},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1925},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
132 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
133 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
134 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
135 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
136 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
137 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
138 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
139 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
140 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
141 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
142 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
143 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
144 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
145 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
146 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
147 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
148 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
149 {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
150 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
151 {0,}
152};
153
154MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
155
156
157
158
159
160static struct board_type products[] = {
161 {0x40700E11, "Smart Array 5300", &SA5A_access},
162 {0x40800E11, "Smart Array 5i", &SA5B_access},
163 {0x40820E11, "Smart Array 532", &SA5B_access},
164 {0x40830E11, "Smart Array 5312", &SA5B_access},
165 {0x409A0E11, "Smart Array 641", &SA5A_access},
166 {0x409B0E11, "Smart Array 642", &SA5A_access},
167 {0x409C0E11, "Smart Array 6400", &SA5A_access},
168 {0x409D0E11, "Smart Array 6400 EM", &SA5A_access},
169 {0x40910E11, "Smart Array 6i", &SA5A_access},
170 {0x3225103C, "Smart Array P600", &SA5A_access},
171 {0x3223103C, "Smart Array P800", &SA5A_access},
172 {0x3234103C, "Smart Array P400", &SA5A_access},
173 {0x3235103C, "Smart Array P400i", &SA5A_access},
174 {0x3211103C, "Smart Array E200i", &SA5A_access},
175 {0x3212103C, "Smart Array E200", &SA5A_access},
176 {0x3213103C, "Smart Array E200i", &SA5A_access},
177 {0x3214103C, "Smart Array E200i", &SA5A_access},
178 {0x3215103C, "Smart Array E200i", &SA5A_access},
179 {0x3237103C, "Smart Array E500", &SA5A_access},
180 {0x323D103C, "Smart Array P700m", &SA5A_access},
181 {0x3241103C, "Smart Array P212", &SA5_access},
182 {0x3243103C, "Smart Array P410", &SA5_access},
183 {0x3245103C, "Smart Array P410i", &SA5_access},
184 {0x3247103C, "Smart Array P411", &SA5_access},
185 {0x3249103C, "Smart Array P812", &SA5_access},
186 {0x324A103C, "Smart Array P712m", &SA5_access},
187 {0x324B103C, "Smart Array P711m", &SA5_access},
188 {0x3233103C, "HP StorageWorks 1210m", &SA5_access},
189 {0x3350103C, "Smart Array P222", &SA5_access},
190 {0x3351103C, "Smart Array P420", &SA5_access},
191 {0x3352103C, "Smart Array P421", &SA5_access},
192 {0x3353103C, "Smart Array P822", &SA5_access},
193 {0x3354103C, "Smart Array P420i", &SA5_access},
194 {0x3355103C, "Smart Array P220i", &SA5_access},
195 {0x3356103C, "Smart Array P721m", &SA5_access},
196 {0x1920103C, "Smart Array P430i", &SA5_access},
197 {0x1921103C, "Smart Array P830i", &SA5_access},
198 {0x1922103C, "Smart Array P430", &SA5_access},
199 {0x1923103C, "Smart Array P431", &SA5_access},
200 {0x1924103C, "Smart Array P830", &SA5_access},
201 {0x1925103C, "Smart Array P831", &SA5_access},
202 {0x1926103C, "Smart Array P731m", &SA5_access},
203 {0x1928103C, "Smart Array P230i", &SA5_access},
204 {0x1929103C, "Smart Array P530", &SA5_access},
205 {0x21BD103C, "Smart Array P244br", &SA5_access},
206 {0x21BE103C, "Smart Array P741m", &SA5_access},
207 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
208 {0x21C0103C, "Smart Array P440ar", &SA5_access},
209 {0x21C1103C, "Smart Array P840ar", &SA5_access},
210 {0x21C2103C, "Smart Array P440", &SA5_access},
211 {0x21C3103C, "Smart Array P441", &SA5_access},
212 {0x21C4103C, "Smart Array", &SA5_access},
213 {0x21C5103C, "Smart Array P841", &SA5_access},
214 {0x21C6103C, "Smart HBA H244br", &SA5_access},
215 {0x21C7103C, "Smart HBA H240", &SA5_access},
216 {0x21C8103C, "Smart HBA H241", &SA5_access},
217 {0x21C9103C, "Smart Array", &SA5_access},
218 {0x21CA103C, "Smart Array P246br", &SA5_access},
219 {0x21CB103C, "Smart Array P840", &SA5_access},
220 {0x21CC103C, "Smart Array", &SA5_access},
221 {0x21CD103C, "Smart Array", &SA5_access},
222 {0x21CE103C, "Smart HBA", &SA5_access},
223 {0x05809005, "SmartHBA-SA", &SA5_access},
224 {0x05819005, "SmartHBA-SA 8i", &SA5_access},
225 {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
226 {0x05839005, "SmartHBA-SA 8e", &SA5_access},
227 {0x05849005, "SmartHBA-SA 16i", &SA5_access},
228 {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
229 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
230 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
231 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
232 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
233 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
234 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
235};
236
237static struct scsi_transport_template *hpsa_sas_transport_template;
238static int hpsa_add_sas_host(struct ctlr_info *h);
239static void hpsa_delete_sas_host(struct ctlr_info *h);
240static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
241 struct hpsa_scsi_dev_t *device);
242static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device);
243static struct hpsa_scsi_dev_t
244 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
245 struct sas_rphy *rphy);
246
247#define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
248static const struct scsi_cmnd hpsa_cmd_busy;
249#define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
250static const struct scsi_cmnd hpsa_cmd_idle;
251static int number_of_controllers;
252
253static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
254static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
255static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
256 void __user *arg);
257static int hpsa_passthru_ioctl(struct ctlr_info *h,
258 IOCTL_Command_struct *iocommand);
259static int hpsa_big_passthru_ioctl(struct ctlr_info *h,
260 BIG_IOCTL_Command_struct *ioc);
261
262#ifdef CONFIG_COMPAT
263static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
264 void __user *arg);
265#endif
266
267static void cmd_free(struct ctlr_info *h, struct CommandList *c);
268static struct CommandList *cmd_alloc(struct ctlr_info *h);
269static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
270static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
271 struct scsi_cmnd *scmd);
272static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
273 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
274 int cmd_type);
275static void hpsa_free_cmd_pool(struct ctlr_info *h);
276#define VPD_PAGE (1 << 8)
277#define HPSA_SIMPLE_ERROR_BITS 0x03
278
279static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
280static void hpsa_scan_start(struct Scsi_Host *);
281static int hpsa_scan_finished(struct Scsi_Host *sh,
282 unsigned long elapsed_time);
283static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
284
285static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
286static int hpsa_slave_alloc(struct scsi_device *sdev);
287static int hpsa_slave_configure(struct scsi_device *sdev);
288static void hpsa_slave_destroy(struct scsi_device *sdev);
289
290static void hpsa_update_scsi_devices(struct ctlr_info *h);
291static int check_for_unit_attention(struct ctlr_info *h,
292 struct CommandList *c);
293static void check_ioctl_unit_attention(struct ctlr_info *h,
294 struct CommandList *c);
295
296static void calc_bucket_map(int *bucket, int num_buckets,
297 int nsgs, int min_blocks, u32 *bucket_map);
298static void hpsa_free_performant_mode(struct ctlr_info *h);
299static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
300static inline u32 next_command(struct ctlr_info *h, u8 q);
301static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
302 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
303 u64 *cfg_offset);
304static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
305 unsigned long *memory_bar);
306static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
307 bool *legacy_board);
308static int wait_for_device_to_become_ready(struct ctlr_info *h,
309 unsigned char lunaddr[],
310 int reply_queue);
311static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
312 int wait_for_ready);
313static inline void finish_cmd(struct CommandList *c);
314static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
315#define BOARD_NOT_READY 0
316#define BOARD_READY 1
317static void hpsa_drain_accel_commands(struct ctlr_info *h);
318static void hpsa_flush_cache(struct ctlr_info *h);
319static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
320 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
321 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
322static void hpsa_command_resubmit_worker(struct work_struct *work);
323static u32 lockup_detected(struct ctlr_info *h);
324static int detect_controller_lockup(struct ctlr_info *h);
325static void hpsa_disable_rld_caching(struct ctlr_info *h);
326static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
327 struct ReportExtendedLUNdata *buf, int bufsize);
328static bool hpsa_vpd_page_supported(struct ctlr_info *h,
329 unsigned char scsi3addr[], u8 page);
330static int hpsa_luns_changed(struct ctlr_info *h);
331static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
332 struct hpsa_scsi_dev_t *dev,
333 unsigned char *scsi3addr);
334
335static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
336{
337 unsigned long *priv = shost_priv(sdev->host);
338 return (struct ctlr_info *) *priv;
339}
340
341static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
342{
343 unsigned long *priv = shost_priv(sh);
344 return (struct ctlr_info *) *priv;
345}
346
347static inline bool hpsa_is_cmd_idle(struct CommandList *c)
348{
349 return c->scsi_cmd == SCSI_CMD_IDLE;
350}
351
352
353static void decode_sense_data(const u8 *sense_data, int sense_data_len,
354 u8 *sense_key, u8 *asc, u8 *ascq)
355{
356 struct scsi_sense_hdr sshdr;
357 bool rc;
358
359 *sense_key = -1;
360 *asc = -1;
361 *ascq = -1;
362
363 if (sense_data_len < 1)
364 return;
365
366 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
367 if (rc) {
368 *sense_key = sshdr.sense_key;
369 *asc = sshdr.asc;
370 *ascq = sshdr.ascq;
371 }
372}
373
374static int check_for_unit_attention(struct ctlr_info *h,
375 struct CommandList *c)
376{
377 u8 sense_key, asc, ascq;
378 int sense_len;
379
380 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
381 sense_len = sizeof(c->err_info->SenseInfo);
382 else
383 sense_len = c->err_info->SenseLen;
384
385 decode_sense_data(c->err_info->SenseInfo, sense_len,
386 &sense_key, &asc, &ascq);
387 if (sense_key != UNIT_ATTENTION || asc == 0xff)
388 return 0;
389
390 switch (asc) {
391 case STATE_CHANGED:
392 dev_warn(&h->pdev->dev,
393 "%s: a state change detected, command retried\n",
394 h->devname);
395 break;
396 case LUN_FAILED:
397 dev_warn(&h->pdev->dev,
398 "%s: LUN failure detected\n", h->devname);
399 break;
400 case REPORT_LUNS_CHANGED:
401 dev_warn(&h->pdev->dev,
402 "%s: report LUN data changed\n", h->devname);
403
404
405
406
407 break;
408 case POWER_OR_RESET:
409 dev_warn(&h->pdev->dev,
410 "%s: a power on or device reset detected\n",
411 h->devname);
412 break;
413 case UNIT_ATTENTION_CLEARED:
414 dev_warn(&h->pdev->dev,
415 "%s: unit attention cleared by another initiator\n",
416 h->devname);
417 break;
418 default:
419 dev_warn(&h->pdev->dev,
420 "%s: unknown unit attention detected\n",
421 h->devname);
422 break;
423 }
424 return 1;
425}
426
427static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
428{
429 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
430 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
431 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
432 return 0;
433 dev_warn(&h->pdev->dev, HPSA "device busy");
434 return 1;
435}
436
437static u32 lockup_detected(struct ctlr_info *h);
438static ssize_t host_show_lockup_detected(struct device *dev,
439 struct device_attribute *attr, char *buf)
440{
441 int ld;
442 struct ctlr_info *h;
443 struct Scsi_Host *shost = class_to_shost(dev);
444
445 h = shost_to_hba(shost);
446 ld = lockup_detected(h);
447
448 return sprintf(buf, "ld=%d\n", ld);
449}
450
451static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
452 struct device_attribute *attr,
453 const char *buf, size_t count)
454{
455 int status, len;
456 struct ctlr_info *h;
457 struct Scsi_Host *shost = class_to_shost(dev);
458 char tmpbuf[10];
459
460 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
461 return -EACCES;
462 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
463 strncpy(tmpbuf, buf, len);
464 tmpbuf[len] = '\0';
465 if (sscanf(tmpbuf, "%d", &status) != 1)
466 return -EINVAL;
467 h = shost_to_hba(shost);
468 h->acciopath_status = !!status;
469 dev_warn(&h->pdev->dev,
470 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
471 h->acciopath_status ? "enabled" : "disabled");
472 return count;
473}
474
475static ssize_t host_store_raid_offload_debug(struct device *dev,
476 struct device_attribute *attr,
477 const char *buf, size_t count)
478{
479 int debug_level, len;
480 struct ctlr_info *h;
481 struct Scsi_Host *shost = class_to_shost(dev);
482 char tmpbuf[10];
483
484 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
485 return -EACCES;
486 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
487 strncpy(tmpbuf, buf, len);
488 tmpbuf[len] = '\0';
489 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
490 return -EINVAL;
491 if (debug_level < 0)
492 debug_level = 0;
493 h = shost_to_hba(shost);
494 h->raid_offload_debug = debug_level;
495 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
496 h->raid_offload_debug);
497 return count;
498}
499
500static ssize_t host_store_rescan(struct device *dev,
501 struct device_attribute *attr,
502 const char *buf, size_t count)
503{
504 struct ctlr_info *h;
505 struct Scsi_Host *shost = class_to_shost(dev);
506 h = shost_to_hba(shost);
507 hpsa_scan_start(h->scsi_host);
508 return count;
509}
510
511static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device)
512{
513 device->offload_enabled = 0;
514 device->offload_to_be_enabled = 0;
515}
516
517static ssize_t host_show_firmware_revision(struct device *dev,
518 struct device_attribute *attr, char *buf)
519{
520 struct ctlr_info *h;
521 struct Scsi_Host *shost = class_to_shost(dev);
522 unsigned char *fwrev;
523
524 h = shost_to_hba(shost);
525 if (!h->hba_inquiry_data)
526 return 0;
527 fwrev = &h->hba_inquiry_data[32];
528 return snprintf(buf, 20, "%c%c%c%c\n",
529 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
530}
531
532static ssize_t host_show_commands_outstanding(struct device *dev,
533 struct device_attribute *attr, char *buf)
534{
535 struct Scsi_Host *shost = class_to_shost(dev);
536 struct ctlr_info *h = shost_to_hba(shost);
537
538 return snprintf(buf, 20, "%d\n",
539 atomic_read(&h->commands_outstanding));
540}
541
542static ssize_t host_show_transport_mode(struct device *dev,
543 struct device_attribute *attr, char *buf)
544{
545 struct ctlr_info *h;
546 struct Scsi_Host *shost = class_to_shost(dev);
547
548 h = shost_to_hba(shost);
549 return snprintf(buf, 20, "%s\n",
550 h->transMethod & CFGTBL_Trans_Performant ?
551 "performant" : "simple");
552}
553
554static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
555 struct device_attribute *attr, char *buf)
556{
557 struct ctlr_info *h;
558 struct Scsi_Host *shost = class_to_shost(dev);
559
560 h = shost_to_hba(shost);
561 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
562 (h->acciopath_status == 1) ? "enabled" : "disabled");
563}
564
565
566static u32 unresettable_controller[] = {
567 0x324a103C,
568 0x324b103C,
569 0x3223103C,
570 0x3234103C,
571 0x3235103C,
572 0x3211103C,
573 0x3212103C,
574 0x3213103C,
575 0x3214103C,
576 0x3215103C,
577 0x3237103C,
578 0x323D103C,
579 0x40800E11,
580 0x409C0E11,
581 0x409D0E11,
582 0x40700E11,
583 0x40820E11,
584 0x40830E11,
585 0x409A0E11,
586 0x409B0E11,
587 0x40910E11,
588};
589
590
591static u32 soft_unresettable_controller[] = {
592 0x40800E11,
593 0x40700E11,
594 0x40820E11,
595 0x40830E11,
596 0x409A0E11,
597 0x409B0E11,
598 0x40910E11,
599
600
601
602
603
604
605
606 0x409C0E11,
607 0x409D0E11,
608};
609
610static int board_id_in_array(u32 a[], int nelems, u32 board_id)
611{
612 int i;
613
614 for (i = 0; i < nelems; i++)
615 if (a[i] == board_id)
616 return 1;
617 return 0;
618}
619
620static int ctlr_is_hard_resettable(u32 board_id)
621{
622 return !board_id_in_array(unresettable_controller,
623 ARRAY_SIZE(unresettable_controller), board_id);
624}
625
626static int ctlr_is_soft_resettable(u32 board_id)
627{
628 return !board_id_in_array(soft_unresettable_controller,
629 ARRAY_SIZE(soft_unresettable_controller), board_id);
630}
631
632static int ctlr_is_resettable(u32 board_id)
633{
634 return ctlr_is_hard_resettable(board_id) ||
635 ctlr_is_soft_resettable(board_id);
636}
637
638static ssize_t host_show_resettable(struct device *dev,
639 struct device_attribute *attr, char *buf)
640{
641 struct ctlr_info *h;
642 struct Scsi_Host *shost = class_to_shost(dev);
643
644 h = shost_to_hba(shost);
645 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
646}
647
648static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
649{
650 return (scsi3addr[3] & 0xC0) == 0x40;
651}
652
653static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
654 "1(+0)ADM", "UNKNOWN", "PHYS DRV"
655};
656#define HPSA_RAID_0 0
657#define HPSA_RAID_4 1
658#define HPSA_RAID_1 2
659#define HPSA_RAID_5 3
660#define HPSA_RAID_51 4
661#define HPSA_RAID_6 5
662#define HPSA_RAID_ADM 6
663#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
664#define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
665
666static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
667{
668 return !device->physical_device;
669}
670
671static ssize_t raid_level_show(struct device *dev,
672 struct device_attribute *attr, char *buf)
673{
674 ssize_t l = 0;
675 unsigned char rlevel;
676 struct ctlr_info *h;
677 struct scsi_device *sdev;
678 struct hpsa_scsi_dev_t *hdev;
679 unsigned long flags;
680
681 sdev = to_scsi_device(dev);
682 h = sdev_to_hba(sdev);
683 spin_lock_irqsave(&h->lock, flags);
684 hdev = sdev->hostdata;
685 if (!hdev) {
686 spin_unlock_irqrestore(&h->lock, flags);
687 return -ENODEV;
688 }
689
690
691 if (!is_logical_device(hdev)) {
692 spin_unlock_irqrestore(&h->lock, flags);
693 l = snprintf(buf, PAGE_SIZE, "N/A\n");
694 return l;
695 }
696
697 rlevel = hdev->raid_level;
698 spin_unlock_irqrestore(&h->lock, flags);
699 if (rlevel > RAID_UNKNOWN)
700 rlevel = RAID_UNKNOWN;
701 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
702 return l;
703}
704
705static ssize_t lunid_show(struct device *dev,
706 struct device_attribute *attr, char *buf)
707{
708 struct ctlr_info *h;
709 struct scsi_device *sdev;
710 struct hpsa_scsi_dev_t *hdev;
711 unsigned long flags;
712 unsigned char lunid[8];
713
714 sdev = to_scsi_device(dev);
715 h = sdev_to_hba(sdev);
716 spin_lock_irqsave(&h->lock, flags);
717 hdev = sdev->hostdata;
718 if (!hdev) {
719 spin_unlock_irqrestore(&h->lock, flags);
720 return -ENODEV;
721 }
722 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
723 spin_unlock_irqrestore(&h->lock, flags);
724 return snprintf(buf, 20, "0x%8phN\n", lunid);
725}
726
727static ssize_t unique_id_show(struct device *dev,
728 struct device_attribute *attr, char *buf)
729{
730 struct ctlr_info *h;
731 struct scsi_device *sdev;
732 struct hpsa_scsi_dev_t *hdev;
733 unsigned long flags;
734 unsigned char sn[16];
735
736 sdev = to_scsi_device(dev);
737 h = sdev_to_hba(sdev);
738 spin_lock_irqsave(&h->lock, flags);
739 hdev = sdev->hostdata;
740 if (!hdev) {
741 spin_unlock_irqrestore(&h->lock, flags);
742 return -ENODEV;
743 }
744 memcpy(sn, hdev->device_id, sizeof(sn));
745 spin_unlock_irqrestore(&h->lock, flags);
746 return snprintf(buf, 16 * 2 + 2,
747 "%02X%02X%02X%02X%02X%02X%02X%02X"
748 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
749 sn[0], sn[1], sn[2], sn[3],
750 sn[4], sn[5], sn[6], sn[7],
751 sn[8], sn[9], sn[10], sn[11],
752 sn[12], sn[13], sn[14], sn[15]);
753}
754
755static ssize_t sas_address_show(struct device *dev,
756 struct device_attribute *attr, char *buf)
757{
758 struct ctlr_info *h;
759 struct scsi_device *sdev;
760 struct hpsa_scsi_dev_t *hdev;
761 unsigned long flags;
762 u64 sas_address;
763
764 sdev = to_scsi_device(dev);
765 h = sdev_to_hba(sdev);
766 spin_lock_irqsave(&h->lock, flags);
767 hdev = sdev->hostdata;
768 if (!hdev || is_logical_device(hdev) || !hdev->expose_device) {
769 spin_unlock_irqrestore(&h->lock, flags);
770 return -ENODEV;
771 }
772 sas_address = hdev->sas_address;
773 spin_unlock_irqrestore(&h->lock, flags);
774
775 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address);
776}
777
778static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
779 struct device_attribute *attr, char *buf)
780{
781 struct ctlr_info *h;
782 struct scsi_device *sdev;
783 struct hpsa_scsi_dev_t *hdev;
784 unsigned long flags;
785 int offload_enabled;
786
787 sdev = to_scsi_device(dev);
788 h = sdev_to_hba(sdev);
789 spin_lock_irqsave(&h->lock, flags);
790 hdev = sdev->hostdata;
791 if (!hdev) {
792 spin_unlock_irqrestore(&h->lock, flags);
793 return -ENODEV;
794 }
795 offload_enabled = hdev->offload_enabled;
796 spin_unlock_irqrestore(&h->lock, flags);
797
798 if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC)
799 return snprintf(buf, 20, "%d\n", offload_enabled);
800 else
801 return snprintf(buf, 40, "%s\n",
802 "Not applicable for a controller");
803}
804
805#define MAX_PATHS 8
806static ssize_t path_info_show(struct device *dev,
807 struct device_attribute *attr, char *buf)
808{
809 struct ctlr_info *h;
810 struct scsi_device *sdev;
811 struct hpsa_scsi_dev_t *hdev;
812 unsigned long flags;
813 int i;
814 int output_len = 0;
815 u8 box;
816 u8 bay;
817 u8 path_map_index = 0;
818 char *active;
819 unsigned char phys_connector[2];
820
821 sdev = to_scsi_device(dev);
822 h = sdev_to_hba(sdev);
823 spin_lock_irqsave(&h->devlock, flags);
824 hdev = sdev->hostdata;
825 if (!hdev) {
826 spin_unlock_irqrestore(&h->devlock, flags);
827 return -ENODEV;
828 }
829
830 bay = hdev->bay;
831 for (i = 0; i < MAX_PATHS; i++) {
832 path_map_index = 1<<i;
833 if (i == hdev->active_path_index)
834 active = "Active";
835 else if (hdev->path_map & path_map_index)
836 active = "Inactive";
837 else
838 continue;
839
840 output_len += scnprintf(buf + output_len,
841 PAGE_SIZE - output_len,
842 "[%d:%d:%d:%d] %20.20s ",
843 h->scsi_host->host_no,
844 hdev->bus, hdev->target, hdev->lun,
845 scsi_device_type(hdev->devtype));
846
847 if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) {
848 output_len += scnprintf(buf + output_len,
849 PAGE_SIZE - output_len,
850 "%s\n", active);
851 continue;
852 }
853
854 box = hdev->box[i];
855 memcpy(&phys_connector, &hdev->phys_connector[i],
856 sizeof(phys_connector));
857 if (phys_connector[0] < '0')
858 phys_connector[0] = '0';
859 if (phys_connector[1] < '0')
860 phys_connector[1] = '0';
861 output_len += scnprintf(buf + output_len,
862 PAGE_SIZE - output_len,
863 "PORT: %.2s ",
864 phys_connector);
865 if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) &&
866 hdev->expose_device) {
867 if (box == 0 || box == 0xFF) {
868 output_len += scnprintf(buf + output_len,
869 PAGE_SIZE - output_len,
870 "BAY: %hhu %s\n",
871 bay, active);
872 } else {
873 output_len += scnprintf(buf + output_len,
874 PAGE_SIZE - output_len,
875 "BOX: %hhu BAY: %hhu %s\n",
876 box, bay, active);
877 }
878 } else if (box != 0 && box != 0xFF) {
879 output_len += scnprintf(buf + output_len,
880 PAGE_SIZE - output_len, "BOX: %hhu %s\n",
881 box, active);
882 } else
883 output_len += scnprintf(buf + output_len,
884 PAGE_SIZE - output_len, "%s\n", active);
885 }
886
887 spin_unlock_irqrestore(&h->devlock, flags);
888 return output_len;
889}
890
891static ssize_t host_show_ctlr_num(struct device *dev,
892 struct device_attribute *attr, char *buf)
893{
894 struct ctlr_info *h;
895 struct Scsi_Host *shost = class_to_shost(dev);
896
897 h = shost_to_hba(shost);
898 return snprintf(buf, 20, "%d\n", h->ctlr);
899}
900
901static ssize_t host_show_legacy_board(struct device *dev,
902 struct device_attribute *attr, char *buf)
903{
904 struct ctlr_info *h;
905 struct Scsi_Host *shost = class_to_shost(dev);
906
907 h = shost_to_hba(shost);
908 return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0);
909}
910
911static DEVICE_ATTR_RO(raid_level);
912static DEVICE_ATTR_RO(lunid);
913static DEVICE_ATTR_RO(unique_id);
914static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
915static DEVICE_ATTR_RO(sas_address);
916static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
917 host_show_hp_ssd_smart_path_enabled, NULL);
918static DEVICE_ATTR_RO(path_info);
919static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
920 host_show_hp_ssd_smart_path_status,
921 host_store_hp_ssd_smart_path_status);
922static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
923 host_store_raid_offload_debug);
924static DEVICE_ATTR(firmware_revision, S_IRUGO,
925 host_show_firmware_revision, NULL);
926static DEVICE_ATTR(commands_outstanding, S_IRUGO,
927 host_show_commands_outstanding, NULL);
928static DEVICE_ATTR(transport_mode, S_IRUGO,
929 host_show_transport_mode, NULL);
930static DEVICE_ATTR(resettable, S_IRUGO,
931 host_show_resettable, NULL);
932static DEVICE_ATTR(lockup_detected, S_IRUGO,
933 host_show_lockup_detected, NULL);
934static DEVICE_ATTR(ctlr_num, S_IRUGO,
935 host_show_ctlr_num, NULL);
936static DEVICE_ATTR(legacy_board, S_IRUGO,
937 host_show_legacy_board, NULL);
938
939static struct device_attribute *hpsa_sdev_attrs[] = {
940 &dev_attr_raid_level,
941 &dev_attr_lunid,
942 &dev_attr_unique_id,
943 &dev_attr_hp_ssd_smart_path_enabled,
944 &dev_attr_path_info,
945 &dev_attr_sas_address,
946 NULL,
947};
948
949static struct device_attribute *hpsa_shost_attrs[] = {
950 &dev_attr_rescan,
951 &dev_attr_firmware_revision,
952 &dev_attr_commands_outstanding,
953 &dev_attr_transport_mode,
954 &dev_attr_resettable,
955 &dev_attr_hp_ssd_smart_path_status,
956 &dev_attr_raid_offload_debug,
957 &dev_attr_lockup_detected,
958 &dev_attr_ctlr_num,
959 &dev_attr_legacy_board,
960 NULL,
961};
962
963#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\
964 HPSA_MAX_CONCURRENT_PASSTHRUS)
965
966static struct scsi_host_template hpsa_driver_template = {
967 .module = THIS_MODULE,
968 .name = HPSA,
969 .proc_name = HPSA,
970 .queuecommand = hpsa_scsi_queue_command,
971 .scan_start = hpsa_scan_start,
972 .scan_finished = hpsa_scan_finished,
973 .change_queue_depth = hpsa_change_queue_depth,
974 .this_id = -1,
975 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
976 .ioctl = hpsa_ioctl,
977 .slave_alloc = hpsa_slave_alloc,
978 .slave_configure = hpsa_slave_configure,
979 .slave_destroy = hpsa_slave_destroy,
980#ifdef CONFIG_COMPAT
981 .compat_ioctl = hpsa_compat_ioctl,
982#endif
983 .sdev_attrs = hpsa_sdev_attrs,
984 .shost_attrs = hpsa_shost_attrs,
985 .max_sectors = 2048,
986 .no_write_same = 1,
987};
988
989static inline u32 next_command(struct ctlr_info *h, u8 q)
990{
991 u32 a;
992 struct reply_queue_buffer *rq = &h->reply_queue[q];
993
994 if (h->transMethod & CFGTBL_Trans_io_accel1)
995 return h->access.command_completed(h, q);
996
997 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
998 return h->access.command_completed(h, q);
999
1000 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
1001 a = rq->head[rq->current_entry];
1002 rq->current_entry++;
1003 atomic_dec(&h->commands_outstanding);
1004 } else {
1005 a = FIFO_EMPTY;
1006 }
1007
1008 if (rq->current_entry == h->max_commands) {
1009 rq->current_entry = 0;
1010 rq->wraparound ^= 1;
1011 }
1012 return a;
1013}
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046#define DEFAULT_REPLY_QUEUE (-1)
1047static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
1048 int reply_queue)
1049{
1050 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
1051 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
1052 if (unlikely(!h->msix_vectors))
1053 return;
1054 c->Header.ReplyQueue = reply_queue;
1055 }
1056}
1057
1058static void set_ioaccel1_performant_mode(struct ctlr_info *h,
1059 struct CommandList *c,
1060 int reply_queue)
1061{
1062 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
1063
1064
1065
1066
1067
1068 cp->ReplyQueue = reply_queue;
1069
1070
1071
1072
1073
1074
1075 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
1076 IOACCEL1_BUSADDR_CMDTYPE;
1077}
1078
1079static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
1080 struct CommandList *c,
1081 int reply_queue)
1082{
1083 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
1084 &h->ioaccel2_cmd_pool[c->cmdindex];
1085
1086
1087
1088
1089 cp->reply_queue = reply_queue;
1090
1091
1092
1093
1094
1095 c->busaddr |= h->ioaccel2_blockFetchTable[0];
1096}
1097
1098static void set_ioaccel2_performant_mode(struct ctlr_info *h,
1099 struct CommandList *c,
1100 int reply_queue)
1101{
1102 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
1103
1104
1105
1106
1107
1108 cp->reply_queue = reply_queue;
1109
1110
1111
1112
1113
1114
1115 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
1116}
1117
1118static int is_firmware_flash_cmd(u8 *cdb)
1119{
1120 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
1121}
1122
1123
1124
1125
1126
1127
1128#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1129#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1130#define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ)
1131static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
1132 struct CommandList *c)
1133{
1134 if (!is_firmware_flash_cmd(c->Request.CDB))
1135 return;
1136 atomic_inc(&h->firmware_flash_in_progress);
1137 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
1138}
1139
1140static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
1141 struct CommandList *c)
1142{
1143 if (is_firmware_flash_cmd(c->Request.CDB) &&
1144 atomic_dec_and_test(&h->firmware_flash_in_progress))
1145 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
1146}
1147
1148static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
1149 struct CommandList *c, int reply_queue)
1150{
1151 dial_down_lockup_detection_during_fw_flash(h, c);
1152 atomic_inc(&h->commands_outstanding);
1153
1154
1155
1156 if (c->device && !c->retry_pending)
1157 atomic_inc(&c->device->commands_outstanding);
1158
1159 reply_queue = h->reply_map[raw_smp_processor_id()];
1160 switch (c->cmd_type) {
1161 case CMD_IOACCEL1:
1162 set_ioaccel1_performant_mode(h, c, reply_queue);
1163 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
1164 break;
1165 case CMD_IOACCEL2:
1166 set_ioaccel2_performant_mode(h, c, reply_queue);
1167 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1168 break;
1169 case IOACCEL2_TMF:
1170 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
1171 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1172 break;
1173 default:
1174 set_performant_mode(h, c, reply_queue);
1175 h->access.submit_command(h, c);
1176 }
1177}
1178
1179static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
1180{
1181 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
1182}
1183
1184static inline int is_hba_lunid(unsigned char scsi3addr[])
1185{
1186 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
1187}
1188
1189static inline int is_scsi_rev_5(struct ctlr_info *h)
1190{
1191 if (!h->hba_inquiry_data)
1192 return 0;
1193 if ((h->hba_inquiry_data[2] & 0x07) == 5)
1194 return 1;
1195 return 0;
1196}
1197
1198static int hpsa_find_target_lun(struct ctlr_info *h,
1199 unsigned char scsi3addr[], int bus, int *target, int *lun)
1200{
1201
1202
1203
1204 int i, found = 0;
1205 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
1206
1207 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
1208
1209 for (i = 0; i < h->ndevices; i++) {
1210 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
1211 __set_bit(h->dev[i]->target, lun_taken);
1212 }
1213
1214 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1215 if (i < HPSA_MAX_DEVICES) {
1216
1217 *target = i;
1218 *lun = 0;
1219 found = 1;
1220 }
1221 return !found;
1222}
1223
1224static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1225 struct hpsa_scsi_dev_t *dev, char *description)
1226{
1227#define LABEL_SIZE 25
1228 char label[LABEL_SIZE];
1229
1230 if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
1231 return;
1232
1233 switch (dev->devtype) {
1234 case TYPE_RAID:
1235 snprintf(label, LABEL_SIZE, "controller");
1236 break;
1237 case TYPE_ENCLOSURE:
1238 snprintf(label, LABEL_SIZE, "enclosure");
1239 break;
1240 case TYPE_DISK:
1241 case TYPE_ZBC:
1242 if (dev->external)
1243 snprintf(label, LABEL_SIZE, "external");
1244 else if (!is_logical_dev_addr_mode(dev->scsi3addr))
1245 snprintf(label, LABEL_SIZE, "%s",
1246 raid_label[PHYSICAL_DRIVE]);
1247 else
1248 snprintf(label, LABEL_SIZE, "RAID-%s",
1249 dev->raid_level > RAID_UNKNOWN ? "?" :
1250 raid_label[dev->raid_level]);
1251 break;
1252 case TYPE_ROM:
1253 snprintf(label, LABEL_SIZE, "rom");
1254 break;
1255 case TYPE_TAPE:
1256 snprintf(label, LABEL_SIZE, "tape");
1257 break;
1258 case TYPE_MEDIUM_CHANGER:
1259 snprintf(label, LABEL_SIZE, "changer");
1260 break;
1261 default:
1262 snprintf(label, LABEL_SIZE, "UNKNOWN");
1263 break;
1264 }
1265
1266 dev_printk(level, &h->pdev->dev,
1267 "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
1268 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1269 description,
1270 scsi_device_type(dev->devtype),
1271 dev->vendor,
1272 dev->model,
1273 label,
1274 dev->offload_config ? '+' : '-',
1275 dev->offload_to_be_enabled ? '+' : '-',
1276 dev->expose_device);
1277}
1278
1279
1280static int hpsa_scsi_add_entry(struct ctlr_info *h,
1281 struct hpsa_scsi_dev_t *device,
1282 struct hpsa_scsi_dev_t *added[], int *nadded)
1283{
1284
1285 int n = h->ndevices;
1286 int i;
1287 unsigned char addr1[8], addr2[8];
1288 struct hpsa_scsi_dev_t *sd;
1289
1290 if (n >= HPSA_MAX_DEVICES) {
1291 dev_err(&h->pdev->dev, "too many devices, some will be "
1292 "inaccessible.\n");
1293 return -1;
1294 }
1295
1296
1297 if (device->lun != -1)
1298
1299 goto lun_assigned;
1300
1301
1302
1303
1304
1305 if (device->scsi3addr[4] == 0) {
1306
1307 if (hpsa_find_target_lun(h, device->scsi3addr,
1308 device->bus, &device->target, &device->lun) != 0)
1309 return -1;
1310 goto lun_assigned;
1311 }
1312
1313
1314
1315
1316
1317
1318
1319 memcpy(addr1, device->scsi3addr, 8);
1320 addr1[4] = 0;
1321 addr1[5] = 0;
1322 for (i = 0; i < n; i++) {
1323 sd = h->dev[i];
1324 memcpy(addr2, sd->scsi3addr, 8);
1325 addr2[4] = 0;
1326 addr2[5] = 0;
1327
1328 if (memcmp(addr1, addr2, 8) == 0) {
1329 device->bus = sd->bus;
1330 device->target = sd->target;
1331 device->lun = device->scsi3addr[4];
1332 break;
1333 }
1334 }
1335 if (device->lun == -1) {
1336 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1337 " suspect firmware bug or unsupported hardware "
1338 "configuration.\n");
1339 return -1;
1340 }
1341
1342lun_assigned:
1343
1344 h->dev[n] = device;
1345 h->ndevices++;
1346 added[*nadded] = device;
1347 (*nadded)++;
1348 hpsa_show_dev_msg(KERN_INFO, h, device,
1349 device->expose_device ? "added" : "masked");
1350 return 0;
1351}
1352
1353
1354
1355
1356
1357
1358static void hpsa_scsi_update_entry(struct ctlr_info *h,
1359 int entry, struct hpsa_scsi_dev_t *new_entry)
1360{
1361
1362 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1363
1364
1365 h->dev[entry]->raid_level = new_entry->raid_level;
1366
1367
1368
1369
1370 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1371
1372
1373 if (new_entry->offload_config && new_entry->offload_to_be_enabled) {
1374
1375
1376
1377
1378
1379
1380
1381
1382 h->dev[entry]->raid_map = new_entry->raid_map;
1383 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1384 }
1385 if (new_entry->offload_to_be_enabled) {
1386 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1387 wmb();
1388 }
1389 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
1390 h->dev[entry]->offload_config = new_entry->offload_config;
1391 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1392 h->dev[entry]->queue_depth = new_entry->queue_depth;
1393
1394
1395
1396
1397
1398
1399 h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled;
1400
1401
1402
1403
1404 if (!new_entry->offload_to_be_enabled)
1405 h->dev[entry]->offload_enabled = 0;
1406
1407 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1408}
1409
1410
1411static void hpsa_scsi_replace_entry(struct ctlr_info *h,
1412 int entry, struct hpsa_scsi_dev_t *new_entry,
1413 struct hpsa_scsi_dev_t *added[], int *nadded,
1414 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1415{
1416
1417 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1418 removed[*nremoved] = h->dev[entry];
1419 (*nremoved)++;
1420
1421
1422
1423
1424
1425 if (new_entry->target == -1) {
1426 new_entry->target = h->dev[entry]->target;
1427 new_entry->lun = h->dev[entry]->lun;
1428 }
1429
1430 h->dev[entry] = new_entry;
1431 added[*nadded] = new_entry;
1432 (*nadded)++;
1433
1434 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1435}
1436
1437
1438static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
1439 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1440{
1441
1442 int i;
1443 struct hpsa_scsi_dev_t *sd;
1444
1445 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1446
1447 sd = h->dev[entry];
1448 removed[*nremoved] = h->dev[entry];
1449 (*nremoved)++;
1450
1451 for (i = entry; i < h->ndevices-1; i++)
1452 h->dev[i] = h->dev[i+1];
1453 h->ndevices--;
1454 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1455}
1456
1457#define SCSI3ADDR_EQ(a, b) ( \
1458 (a)[7] == (b)[7] && \
1459 (a)[6] == (b)[6] && \
1460 (a)[5] == (b)[5] && \
1461 (a)[4] == (b)[4] && \
1462 (a)[3] == (b)[3] && \
1463 (a)[2] == (b)[2] && \
1464 (a)[1] == (b)[1] && \
1465 (a)[0] == (b)[0])
1466
1467static void fixup_botched_add(struct ctlr_info *h,
1468 struct hpsa_scsi_dev_t *added)
1469{
1470
1471
1472
1473 unsigned long flags;
1474 int i, j;
1475
1476 spin_lock_irqsave(&h->lock, flags);
1477 for (i = 0; i < h->ndevices; i++) {
1478 if (h->dev[i] == added) {
1479 for (j = i; j < h->ndevices-1; j++)
1480 h->dev[j] = h->dev[j+1];
1481 h->ndevices--;
1482 break;
1483 }
1484 }
1485 spin_unlock_irqrestore(&h->lock, flags);
1486 kfree(added);
1487}
1488
1489static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1490 struct hpsa_scsi_dev_t *dev2)
1491{
1492
1493
1494
1495
1496 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1497 sizeof(dev1->scsi3addr)) != 0)
1498 return 0;
1499 if (memcmp(dev1->device_id, dev2->device_id,
1500 sizeof(dev1->device_id)) != 0)
1501 return 0;
1502 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1503 return 0;
1504 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1505 return 0;
1506 if (dev1->devtype != dev2->devtype)
1507 return 0;
1508 if (dev1->bus != dev2->bus)
1509 return 0;
1510 return 1;
1511}
1512
1513static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1514 struct hpsa_scsi_dev_t *dev2)
1515{
1516
1517
1518
1519
1520 if (dev1->raid_level != dev2->raid_level)
1521 return 1;
1522 if (dev1->offload_config != dev2->offload_config)
1523 return 1;
1524 if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled)
1525 return 1;
1526 if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1527 if (dev1->queue_depth != dev2->queue_depth)
1528 return 1;
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538 if (dev1->ioaccel_handle != dev2->ioaccel_handle)
1539 return 1;
1540 return 0;
1541}
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1552 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1553 int *index)
1554{
1555 int i;
1556#define DEVICE_NOT_FOUND 0
1557#define DEVICE_CHANGED 1
1558#define DEVICE_SAME 2
1559#define DEVICE_UPDATED 3
1560 if (needle == NULL)
1561 return DEVICE_NOT_FOUND;
1562
1563 for (i = 0; i < haystack_size; i++) {
1564 if (haystack[i] == NULL)
1565 continue;
1566 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1567 *index = i;
1568 if (device_is_the_same(needle, haystack[i])) {
1569 if (device_updated(needle, haystack[i]))
1570 return DEVICE_UPDATED;
1571 return DEVICE_SAME;
1572 } else {
1573
1574 if (needle->volume_offline)
1575 return DEVICE_NOT_FOUND;
1576 return DEVICE_CHANGED;
1577 }
1578 }
1579 }
1580 *index = -1;
1581 return DEVICE_NOT_FOUND;
1582}
1583
1584static void hpsa_monitor_offline_device(struct ctlr_info *h,
1585 unsigned char scsi3addr[])
1586{
1587 struct offline_device_entry *device;
1588 unsigned long flags;
1589
1590
1591 spin_lock_irqsave(&h->offline_device_lock, flags);
1592 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1593 if (memcmp(device->scsi3addr, scsi3addr,
1594 sizeof(device->scsi3addr)) == 0) {
1595 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1596 return;
1597 }
1598 }
1599 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1600
1601
1602 device = kmalloc(sizeof(*device), GFP_KERNEL);
1603 if (!device)
1604 return;
1605
1606 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1607 spin_lock_irqsave(&h->offline_device_lock, flags);
1608 list_add_tail(&device->offline_list, &h->offline_device_list);
1609 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1610}
1611
1612
1613static void hpsa_show_volume_status(struct ctlr_info *h,
1614 struct hpsa_scsi_dev_t *sd)
1615{
1616 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1617 dev_info(&h->pdev->dev,
1618 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1619 h->scsi_host->host_no,
1620 sd->bus, sd->target, sd->lun);
1621 switch (sd->volume_offline) {
1622 case HPSA_LV_OK:
1623 break;
1624 case HPSA_LV_UNDERGOING_ERASE:
1625 dev_info(&h->pdev->dev,
1626 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1627 h->scsi_host->host_no,
1628 sd->bus, sd->target, sd->lun);
1629 break;
1630 case HPSA_LV_NOT_AVAILABLE:
1631 dev_info(&h->pdev->dev,
1632 "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1633 h->scsi_host->host_no,
1634 sd->bus, sd->target, sd->lun);
1635 break;
1636 case HPSA_LV_UNDERGOING_RPI:
1637 dev_info(&h->pdev->dev,
1638 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1639 h->scsi_host->host_no,
1640 sd->bus, sd->target, sd->lun);
1641 break;
1642 case HPSA_LV_PENDING_RPI:
1643 dev_info(&h->pdev->dev,
1644 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1645 h->scsi_host->host_no,
1646 sd->bus, sd->target, sd->lun);
1647 break;
1648 case HPSA_LV_ENCRYPTED_NO_KEY:
1649 dev_info(&h->pdev->dev,
1650 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1651 h->scsi_host->host_no,
1652 sd->bus, sd->target, sd->lun);
1653 break;
1654 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1655 dev_info(&h->pdev->dev,
1656 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1657 h->scsi_host->host_no,
1658 sd->bus, sd->target, sd->lun);
1659 break;
1660 case HPSA_LV_UNDERGOING_ENCRYPTION:
1661 dev_info(&h->pdev->dev,
1662 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1663 h->scsi_host->host_no,
1664 sd->bus, sd->target, sd->lun);
1665 break;
1666 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1667 dev_info(&h->pdev->dev,
1668 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1669 h->scsi_host->host_no,
1670 sd->bus, sd->target, sd->lun);
1671 break;
1672 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1673 dev_info(&h->pdev->dev,
1674 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1675 h->scsi_host->host_no,
1676 sd->bus, sd->target, sd->lun);
1677 break;
1678 case HPSA_LV_PENDING_ENCRYPTION:
1679 dev_info(&h->pdev->dev,
1680 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1681 h->scsi_host->host_no,
1682 sd->bus, sd->target, sd->lun);
1683 break;
1684 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1685 dev_info(&h->pdev->dev,
1686 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1687 h->scsi_host->host_no,
1688 sd->bus, sd->target, sd->lun);
1689 break;
1690 }
1691}
1692
1693
1694
1695
1696
1697static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1698 struct hpsa_scsi_dev_t *dev[], int ndevices,
1699 struct hpsa_scsi_dev_t *logical_drive)
1700{
1701 struct raid_map_data *map = &logical_drive->raid_map;
1702 struct raid_map_disk_data *dd = &map->data[0];
1703 int i, j;
1704 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1705 le16_to_cpu(map->metadata_disks_per_row);
1706 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1707 le16_to_cpu(map->layout_map_count) *
1708 total_disks_per_row;
1709 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1710 total_disks_per_row;
1711 int qdepth;
1712
1713 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1714 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1715
1716 logical_drive->nphysical_disks = nraid_map_entries;
1717
1718 qdepth = 0;
1719 for (i = 0; i < nraid_map_entries; i++) {
1720 logical_drive->phys_disk[i] = NULL;
1721 if (!logical_drive->offload_config)
1722 continue;
1723 for (j = 0; j < ndevices; j++) {
1724 if (dev[j] == NULL)
1725 continue;
1726 if (dev[j]->devtype != TYPE_DISK &&
1727 dev[j]->devtype != TYPE_ZBC)
1728 continue;
1729 if (is_logical_device(dev[j]))
1730 continue;
1731 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1732 continue;
1733
1734 logical_drive->phys_disk[i] = dev[j];
1735 if (i < nphys_disk)
1736 qdepth = min(h->nr_cmds, qdepth +
1737 logical_drive->phys_disk[i]->queue_depth);
1738 break;
1739 }
1740
1741
1742
1743
1744
1745
1746
1747
1748 if (!logical_drive->phys_disk[i]) {
1749 dev_warn(&h->pdev->dev,
1750 "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n",
1751 __func__,
1752 h->scsi_host->host_no, logical_drive->bus,
1753 logical_drive->target, logical_drive->lun);
1754 hpsa_turn_off_ioaccel_for_device(logical_drive);
1755 logical_drive->queue_depth = 8;
1756 }
1757 }
1758 if (nraid_map_entries)
1759
1760
1761
1762
1763 logical_drive->queue_depth = qdepth;
1764 else {
1765 if (logical_drive->external)
1766 logical_drive->queue_depth = EXTERNAL_QD;
1767 else
1768 logical_drive->queue_depth = h->nr_cmds;
1769 }
1770}
1771
1772static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1773 struct hpsa_scsi_dev_t *dev[], int ndevices)
1774{
1775 int i;
1776
1777 for (i = 0; i < ndevices; i++) {
1778 if (dev[i] == NULL)
1779 continue;
1780 if (dev[i]->devtype != TYPE_DISK &&
1781 dev[i]->devtype != TYPE_ZBC)
1782 continue;
1783 if (!is_logical_device(dev[i]))
1784 continue;
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805 if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled)
1806 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1807 }
1808}
1809
1810static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1811{
1812 int rc = 0;
1813
1814 if (!h->scsi_host)
1815 return 1;
1816
1817 if (is_logical_device(device))
1818 rc = scsi_add_device(h->scsi_host, device->bus,
1819 device->target, device->lun);
1820 else
1821 rc = hpsa_add_sas_device(h->sas_host, device);
1822
1823 return rc;
1824}
1825
1826static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
1827 struct hpsa_scsi_dev_t *dev)
1828{
1829 int i;
1830 int count = 0;
1831
1832 for (i = 0; i < h->nr_cmds; i++) {
1833 struct CommandList *c = h->cmd_pool + i;
1834 int refcount = atomic_inc_return(&c->refcount);
1835
1836 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev,
1837 dev->scsi3addr)) {
1838 unsigned long flags;
1839
1840 spin_lock_irqsave(&h->lock, flags);
1841 if (!hpsa_is_cmd_idle(c))
1842 ++count;
1843 spin_unlock_irqrestore(&h->lock, flags);
1844 }
1845
1846 cmd_free(h, c);
1847 }
1848
1849 return count;
1850}
1851
1852#define NUM_WAIT 20
1853static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
1854 struct hpsa_scsi_dev_t *device)
1855{
1856 int cmds = 0;
1857 int waits = 0;
1858 int num_wait = NUM_WAIT;
1859
1860 if (device->external)
1861 num_wait = HPSA_EH_PTRAID_TIMEOUT;
1862
1863 while (1) {
1864 cmds = hpsa_find_outstanding_commands_for_dev(h, device);
1865 if (cmds == 0)
1866 break;
1867 if (++waits > num_wait)
1868 break;
1869 msleep(1000);
1870 }
1871
1872 if (waits > num_wait) {
1873 dev_warn(&h->pdev->dev,
1874 "%s: removing device [%d:%d:%d:%d] with %d outstanding commands!\n",
1875 __func__,
1876 h->scsi_host->host_no,
1877 device->bus, device->target, device->lun, cmds);
1878 }
1879}
1880
1881static void hpsa_remove_device(struct ctlr_info *h,
1882 struct hpsa_scsi_dev_t *device)
1883{
1884 struct scsi_device *sdev = NULL;
1885
1886 if (!h->scsi_host)
1887 return;
1888
1889
1890
1891
1892 device->removed = 1;
1893 hpsa_wait_for_outstanding_commands_for_dev(h, device);
1894
1895 if (is_logical_device(device)) {
1896 sdev = scsi_device_lookup(h->scsi_host, device->bus,
1897 device->target, device->lun);
1898 if (sdev) {
1899 scsi_remove_device(sdev);
1900 scsi_device_put(sdev);
1901 } else {
1902
1903
1904
1905
1906
1907 hpsa_show_dev_msg(KERN_WARNING, h, device,
1908 "didn't find device for removal.");
1909 }
1910 } else {
1911
1912 hpsa_remove_sas_device(device);
1913 }
1914}
1915
1916static void adjust_hpsa_scsi_table(struct ctlr_info *h,
1917 struct hpsa_scsi_dev_t *sd[], int nsds)
1918{
1919
1920
1921
1922
1923 int i, entry, device_change, changes = 0;
1924 struct hpsa_scsi_dev_t *csd;
1925 unsigned long flags;
1926 struct hpsa_scsi_dev_t **added, **removed;
1927 int nadded, nremoved;
1928
1929
1930
1931
1932
1933 spin_lock_irqsave(&h->reset_lock, flags);
1934 if (h->reset_in_progress) {
1935 h->drv_req_rescan = 1;
1936 spin_unlock_irqrestore(&h->reset_lock, flags);
1937 return;
1938 }
1939 spin_unlock_irqrestore(&h->reset_lock, flags);
1940
1941 added = kcalloc(HPSA_MAX_DEVICES, sizeof(*added), GFP_KERNEL);
1942 removed = kcalloc(HPSA_MAX_DEVICES, sizeof(*removed), GFP_KERNEL);
1943
1944 if (!added || !removed) {
1945 dev_warn(&h->pdev->dev, "out of memory in "
1946 "adjust_hpsa_scsi_table\n");
1947 goto free_and_out;
1948 }
1949
1950 spin_lock_irqsave(&h->devlock, flags);
1951
1952
1953
1954
1955
1956
1957
1958
1959 i = 0;
1960 nremoved = 0;
1961 nadded = 0;
1962 while (i < h->ndevices) {
1963 csd = h->dev[i];
1964 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1965 if (device_change == DEVICE_NOT_FOUND) {
1966 changes++;
1967 hpsa_scsi_remove_entry(h, i, removed, &nremoved);
1968 continue;
1969 } else if (device_change == DEVICE_CHANGED) {
1970 changes++;
1971 hpsa_scsi_replace_entry(h, i, sd[entry],
1972 added, &nadded, removed, &nremoved);
1973
1974
1975
1976 sd[entry] = NULL;
1977 } else if (device_change == DEVICE_UPDATED) {
1978 hpsa_scsi_update_entry(h, i, sd[entry]);
1979 }
1980 i++;
1981 }
1982
1983
1984
1985
1986
1987 for (i = 0; i < nsds; i++) {
1988 if (!sd[i])
1989 continue;
1990
1991
1992
1993
1994
1995
1996 if (sd[i]->volume_offline) {
1997 hpsa_show_volume_status(h, sd[i]);
1998 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1999 continue;
2000 }
2001
2002 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
2003 h->ndevices, &entry);
2004 if (device_change == DEVICE_NOT_FOUND) {
2005 changes++;
2006 if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
2007 break;
2008 sd[i] = NULL;
2009 } else if (device_change == DEVICE_CHANGED) {
2010
2011 changes++;
2012 dev_warn(&h->pdev->dev,
2013 "device unexpectedly changed.\n");
2014
2015 }
2016 }
2017 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027 for (i = 0; i < h->ndevices; i++) {
2028 if (h->dev[i] == NULL)
2029 continue;
2030 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
2031 }
2032
2033 spin_unlock_irqrestore(&h->devlock, flags);
2034
2035
2036
2037
2038
2039 for (i = 0; i < nsds; i++) {
2040 if (!sd[i])
2041 continue;
2042 if (sd[i]->volume_offline)
2043 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
2044 }
2045
2046
2047
2048
2049
2050 if (!changes)
2051 goto free_and_out;
2052
2053
2054 for (i = 0; i < nremoved; i++) {
2055 if (removed[i] == NULL)
2056 continue;
2057 if (removed[i]->expose_device)
2058 hpsa_remove_device(h, removed[i]);
2059 kfree(removed[i]);
2060 removed[i] = NULL;
2061 }
2062
2063
2064 for (i = 0; i < nadded; i++) {
2065 int rc = 0;
2066
2067 if (added[i] == NULL)
2068 continue;
2069 if (!(added[i]->expose_device))
2070 continue;
2071 rc = hpsa_add_device(h, added[i]);
2072 if (!rc)
2073 continue;
2074 dev_warn(&h->pdev->dev,
2075 "addition failed %d, device not added.", rc);
2076
2077
2078
2079 fixup_botched_add(h, added[i]);
2080 h->drv_req_rescan = 1;
2081 }
2082
2083free_and_out:
2084 kfree(added);
2085 kfree(removed);
2086}
2087
2088
2089
2090
2091
2092static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
2093 int bus, int target, int lun)
2094{
2095 int i;
2096 struct hpsa_scsi_dev_t *sd;
2097
2098 for (i = 0; i < h->ndevices; i++) {
2099 sd = h->dev[i];
2100 if (sd->bus == bus && sd->target == target && sd->lun == lun)
2101 return sd;
2102 }
2103 return NULL;
2104}
2105
2106static int hpsa_slave_alloc(struct scsi_device *sdev)
2107{
2108 struct hpsa_scsi_dev_t *sd = NULL;
2109 unsigned long flags;
2110 struct ctlr_info *h;
2111
2112 h = sdev_to_hba(sdev);
2113 spin_lock_irqsave(&h->devlock, flags);
2114 if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) {
2115 struct scsi_target *starget;
2116 struct sas_rphy *rphy;
2117
2118 starget = scsi_target(sdev);
2119 rphy = target_to_rphy(starget);
2120 sd = hpsa_find_device_by_sas_rphy(h, rphy);
2121 if (sd) {
2122 sd->target = sdev_id(sdev);
2123 sd->lun = sdev->lun;
2124 }
2125 }
2126 if (!sd)
2127 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
2128 sdev_id(sdev), sdev->lun);
2129
2130 if (sd && sd->expose_device) {
2131 atomic_set(&sd->ioaccel_cmds_out, 0);
2132 sdev->hostdata = sd;
2133 } else
2134 sdev->hostdata = NULL;
2135 spin_unlock_irqrestore(&h->devlock, flags);
2136 return 0;
2137}
2138
2139
2140#define CTLR_TIMEOUT (120 * HZ)
2141static int hpsa_slave_configure(struct scsi_device *sdev)
2142{
2143 struct hpsa_scsi_dev_t *sd;
2144 int queue_depth;
2145
2146 sd = sdev->hostdata;
2147 sdev->no_uld_attach = !sd || !sd->expose_device;
2148
2149 if (sd) {
2150 sd->was_removed = 0;
2151 queue_depth = sd->queue_depth != 0 ?
2152 sd->queue_depth : sdev->host->can_queue;
2153 if (sd->external) {
2154 queue_depth = EXTERNAL_QD;
2155 sdev->eh_timeout = HPSA_EH_PTRAID_TIMEOUT;
2156 blk_queue_rq_timeout(sdev->request_queue,
2157 HPSA_EH_PTRAID_TIMEOUT);
2158 }
2159 if (is_hba_lunid(sd->scsi3addr)) {
2160 sdev->eh_timeout = CTLR_TIMEOUT;
2161 blk_queue_rq_timeout(sdev->request_queue, CTLR_TIMEOUT);
2162 }
2163 } else {
2164 queue_depth = sdev->host->can_queue;
2165 }
2166
2167 scsi_change_queue_depth(sdev, queue_depth);
2168
2169 return 0;
2170}
2171
2172static void hpsa_slave_destroy(struct scsi_device *sdev)
2173{
2174 struct hpsa_scsi_dev_t *hdev = NULL;
2175
2176 hdev = sdev->hostdata;
2177
2178 if (hdev)
2179 hdev->was_removed = 1;
2180}
2181
2182static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2183{
2184 int i;
2185
2186 if (!h->ioaccel2_cmd_sg_list)
2187 return;
2188 for (i = 0; i < h->nr_cmds; i++) {
2189 kfree(h->ioaccel2_cmd_sg_list[i]);
2190 h->ioaccel2_cmd_sg_list[i] = NULL;
2191 }
2192 kfree(h->ioaccel2_cmd_sg_list);
2193 h->ioaccel2_cmd_sg_list = NULL;
2194}
2195
2196static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2197{
2198 int i;
2199
2200 if (h->chainsize <= 0)
2201 return 0;
2202
2203 h->ioaccel2_cmd_sg_list =
2204 kcalloc(h->nr_cmds, sizeof(*h->ioaccel2_cmd_sg_list),
2205 GFP_KERNEL);
2206 if (!h->ioaccel2_cmd_sg_list)
2207 return -ENOMEM;
2208 for (i = 0; i < h->nr_cmds; i++) {
2209 h->ioaccel2_cmd_sg_list[i] =
2210 kmalloc_array(h->maxsgentries,
2211 sizeof(*h->ioaccel2_cmd_sg_list[i]),
2212 GFP_KERNEL);
2213 if (!h->ioaccel2_cmd_sg_list[i])
2214 goto clean;
2215 }
2216 return 0;
2217
2218clean:
2219 hpsa_free_ioaccel2_sg_chain_blocks(h);
2220 return -ENOMEM;
2221}
2222
2223static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
2224{
2225 int i;
2226
2227 if (!h->cmd_sg_list)
2228 return;
2229 for (i = 0; i < h->nr_cmds; i++) {
2230 kfree(h->cmd_sg_list[i]);
2231 h->cmd_sg_list[i] = NULL;
2232 }
2233 kfree(h->cmd_sg_list);
2234 h->cmd_sg_list = NULL;
2235}
2236
2237static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
2238{
2239 int i;
2240
2241 if (h->chainsize <= 0)
2242 return 0;
2243
2244 h->cmd_sg_list = kcalloc(h->nr_cmds, sizeof(*h->cmd_sg_list),
2245 GFP_KERNEL);
2246 if (!h->cmd_sg_list)
2247 return -ENOMEM;
2248
2249 for (i = 0; i < h->nr_cmds; i++) {
2250 h->cmd_sg_list[i] = kmalloc_array(h->chainsize,
2251 sizeof(*h->cmd_sg_list[i]),
2252 GFP_KERNEL);
2253 if (!h->cmd_sg_list[i])
2254 goto clean;
2255
2256 }
2257 return 0;
2258
2259clean:
2260 hpsa_free_sg_chain_blocks(h);
2261 return -ENOMEM;
2262}
2263
2264static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
2265 struct io_accel2_cmd *cp, struct CommandList *c)
2266{
2267 struct ioaccel2_sg_element *chain_block;
2268 u64 temp64;
2269 u32 chain_size;
2270
2271 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
2272 chain_size = le32_to_cpu(cp->sg[0].length);
2273 temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size,
2274 DMA_TO_DEVICE);
2275 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2276
2277 cp->sg->address = 0;
2278 return -1;
2279 }
2280 cp->sg->address = cpu_to_le64(temp64);
2281 return 0;
2282}
2283
2284static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
2285 struct io_accel2_cmd *cp)
2286{
2287 struct ioaccel2_sg_element *chain_sg;
2288 u64 temp64;
2289 u32 chain_size;
2290
2291 chain_sg = cp->sg;
2292 temp64 = le64_to_cpu(chain_sg->address);
2293 chain_size = le32_to_cpu(cp->sg[0].length);
2294 dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE);
2295}
2296
2297static int hpsa_map_sg_chain_block(struct ctlr_info *h,
2298 struct CommandList *c)
2299{
2300 struct SGDescriptor *chain_sg, *chain_block;
2301 u64 temp64;
2302 u32 chain_len;
2303
2304 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2305 chain_block = h->cmd_sg_list[c->cmdindex];
2306 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
2307 chain_len = sizeof(*chain_sg) *
2308 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
2309 chain_sg->Len = cpu_to_le32(chain_len);
2310 temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len,
2311 DMA_TO_DEVICE);
2312 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2313
2314 chain_sg->Addr = cpu_to_le64(0);
2315 return -1;
2316 }
2317 chain_sg->Addr = cpu_to_le64(temp64);
2318 return 0;
2319}
2320
2321static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
2322 struct CommandList *c)
2323{
2324 struct SGDescriptor *chain_sg;
2325
2326 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
2327 return;
2328
2329 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2330 dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr),
2331 le32_to_cpu(chain_sg->Len), DMA_TO_DEVICE);
2332}
2333
2334
2335
2336
2337
2338
2339static int handle_ioaccel_mode2_error(struct ctlr_info *h,
2340 struct CommandList *c,
2341 struct scsi_cmnd *cmd,
2342 struct io_accel2_cmd *c2,
2343 struct hpsa_scsi_dev_t *dev)
2344{
2345 int data_len;
2346 int retry = 0;
2347 u32 ioaccel2_resid = 0;
2348
2349 switch (c2->error_data.serv_response) {
2350 case IOACCEL2_SERV_RESPONSE_COMPLETE:
2351 switch (c2->error_data.status) {
2352 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
2353 if (cmd)
2354 cmd->result = 0;
2355 break;
2356 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
2357 cmd->result |= SAM_STAT_CHECK_CONDITION;
2358 if (c2->error_data.data_present !=
2359 IOACCEL2_SENSE_DATA_PRESENT) {
2360 memset(cmd->sense_buffer, 0,
2361 SCSI_SENSE_BUFFERSIZE);
2362 break;
2363 }
2364
2365 data_len = c2->error_data.sense_data_len;
2366 if (data_len > SCSI_SENSE_BUFFERSIZE)
2367 data_len = SCSI_SENSE_BUFFERSIZE;
2368 if (data_len > sizeof(c2->error_data.sense_data_buff))
2369 data_len =
2370 sizeof(c2->error_data.sense_data_buff);
2371 memcpy(cmd->sense_buffer,
2372 c2->error_data.sense_data_buff, data_len);
2373 retry = 1;
2374 break;
2375 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
2376 retry = 1;
2377 break;
2378 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
2379 retry = 1;
2380 break;
2381 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
2382 retry = 1;
2383 break;
2384 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
2385 retry = 1;
2386 break;
2387 default:
2388 retry = 1;
2389 break;
2390 }
2391 break;
2392 case IOACCEL2_SERV_RESPONSE_FAILURE:
2393 switch (c2->error_data.status) {
2394 case IOACCEL2_STATUS_SR_IO_ERROR:
2395 case IOACCEL2_STATUS_SR_IO_ABORTED:
2396 case IOACCEL2_STATUS_SR_OVERRUN:
2397 retry = 1;
2398 break;
2399 case IOACCEL2_STATUS_SR_UNDERRUN:
2400 cmd->result = (DID_OK << 16);
2401 ioaccel2_resid = get_unaligned_le32(
2402 &c2->error_data.resid_cnt[0]);
2403 scsi_set_resid(cmd, ioaccel2_resid);
2404 break;
2405 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
2406 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
2407 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
2408
2409
2410
2411
2412
2413
2414
2415
2416 if (dev->physical_device && dev->expose_device) {
2417 cmd->result = DID_NO_CONNECT << 16;
2418 dev->removed = 1;
2419 h->drv_req_rescan = 1;
2420 dev_warn(&h->pdev->dev,
2421 "%s: device is gone!\n", __func__);
2422 } else
2423
2424
2425
2426
2427
2428 retry = 1;
2429 break;
2430 default:
2431 retry = 1;
2432 }
2433 break;
2434 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
2435 break;
2436 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
2437 break;
2438 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
2439 retry = 1;
2440 break;
2441 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
2442 break;
2443 default:
2444 retry = 1;
2445 break;
2446 }
2447
2448 if (dev->in_reset)
2449 retry = 0;
2450
2451 return retry;
2452}
2453
2454static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2455 struct CommandList *c)
2456{
2457 struct hpsa_scsi_dev_t *dev = c->device;
2458
2459
2460
2461
2462
2463
2464 c->scsi_cmd = SCSI_CMD_IDLE;
2465 mb();
2466 if (dev) {
2467 atomic_dec(&dev->commands_outstanding);
2468 if (dev->in_reset &&
2469 atomic_read(&dev->commands_outstanding) <= 0)
2470 wake_up_all(&h->event_sync_wait_queue);
2471 }
2472}
2473
2474static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2475 struct CommandList *c)
2476{
2477 hpsa_cmd_resolve_events(h, c);
2478 cmd_tagged_free(h, c);
2479}
2480
2481static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2482 struct CommandList *c, struct scsi_cmnd *cmd)
2483{
2484 hpsa_cmd_resolve_and_free(h, c);
2485 if (cmd && cmd->scsi_done)
2486 cmd->scsi_done(cmd);
2487}
2488
2489static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2490{
2491 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2492 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2493}
2494
2495static void process_ioaccel2_completion(struct ctlr_info *h,
2496 struct CommandList *c, struct scsi_cmnd *cmd,
2497 struct hpsa_scsi_dev_t *dev)
2498{
2499 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2500
2501
2502 if (likely(c2->error_data.serv_response == 0 &&
2503 c2->error_data.status == 0)) {
2504 cmd->result = 0;
2505 return hpsa_cmd_free_and_done(h, c, cmd);
2506 }
2507
2508
2509
2510
2511
2512
2513 if (is_logical_device(dev) &&
2514 c2->error_data.serv_response ==
2515 IOACCEL2_SERV_RESPONSE_FAILURE) {
2516 if (c2->error_data.status ==
2517 IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
2518 hpsa_turn_off_ioaccel_for_device(dev);
2519 }
2520
2521 if (dev->in_reset) {
2522 cmd->result = DID_RESET << 16;
2523 return hpsa_cmd_free_and_done(h, c, cmd);
2524 }
2525
2526 return hpsa_retry_cmd(h, c);
2527 }
2528
2529 if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev))
2530 return hpsa_retry_cmd(h, c);
2531
2532 return hpsa_cmd_free_and_done(h, c, cmd);
2533}
2534
2535
2536static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2537 struct CommandList *cp)
2538{
2539 u8 tmf_status = cp->err_info->ScsiStatus;
2540
2541 switch (tmf_status) {
2542 case CISS_TMF_COMPLETE:
2543
2544
2545
2546
2547 case CISS_TMF_SUCCESS:
2548 return 0;
2549 case CISS_TMF_INVALID_FRAME:
2550 case CISS_TMF_NOT_SUPPORTED:
2551 case CISS_TMF_FAILED:
2552 case CISS_TMF_WRONG_LUN:
2553 case CISS_TMF_OVERLAPPED_TAG:
2554 break;
2555 default:
2556 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2557 tmf_status);
2558 break;
2559 }
2560 return -tmf_status;
2561}
2562
2563static void complete_scsi_command(struct CommandList *cp)
2564{
2565 struct scsi_cmnd *cmd;
2566 struct ctlr_info *h;
2567 struct ErrorInfo *ei;
2568 struct hpsa_scsi_dev_t *dev;
2569 struct io_accel2_cmd *c2;
2570
2571 u8 sense_key;
2572 u8 asc;
2573 u8 ascq;
2574 unsigned long sense_data_size;
2575
2576 ei = cp->err_info;
2577 cmd = cp->scsi_cmd;
2578 h = cp->h;
2579
2580 if (!cmd->device) {
2581 cmd->result = DID_NO_CONNECT << 16;
2582 return hpsa_cmd_free_and_done(h, cp, cmd);
2583 }
2584
2585 dev = cmd->device->hostdata;
2586 if (!dev) {
2587 cmd->result = DID_NO_CONNECT << 16;
2588 return hpsa_cmd_free_and_done(h, cp, cmd);
2589 }
2590 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2591
2592 scsi_dma_unmap(cmd);
2593 if ((cp->cmd_type == CMD_SCSI) &&
2594 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
2595 hpsa_unmap_sg_chain_block(h, cp);
2596
2597 if ((cp->cmd_type == CMD_IOACCEL2) &&
2598 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2599 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2600
2601 cmd->result = (DID_OK << 16);
2602
2603
2604 if (dev->was_removed) {
2605 hpsa_cmd_resolve_and_free(h, cp);
2606 return;
2607 }
2608
2609 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
2610 if (dev->physical_device && dev->expose_device &&
2611 dev->removed) {
2612 cmd->result = DID_NO_CONNECT << 16;
2613 return hpsa_cmd_free_and_done(h, cp, cmd);
2614 }
2615 if (likely(cp->phys_disk != NULL))
2616 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2617 }
2618
2619
2620
2621
2622
2623
2624 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2625
2626 cmd->result = DID_NO_CONNECT << 16;
2627 return hpsa_cmd_free_and_done(h, cp, cmd);
2628 }
2629
2630 if (cp->cmd_type == CMD_IOACCEL2)
2631 return process_ioaccel2_completion(h, cp, cmd, dev);
2632
2633 scsi_set_resid(cmd, ei->ResidualCnt);
2634 if (ei->CommandStatus == 0)
2635 return hpsa_cmd_free_and_done(h, cp, cmd);
2636
2637
2638
2639
2640 if (cp->cmd_type == CMD_IOACCEL1) {
2641 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2642 cp->Header.SGList = scsi_sg_count(cmd);
2643 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2644 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2645 IOACCEL1_IOFLAGS_CDBLEN_MASK;
2646 cp->Header.tag = c->tag;
2647 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2648 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
2649
2650
2651
2652
2653
2654 if (is_logical_device(dev)) {
2655 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2656 dev->offload_enabled = 0;
2657 return hpsa_retry_cmd(h, cp);
2658 }
2659 }
2660
2661
2662 switch (ei->CommandStatus) {
2663
2664 case CMD_TARGET_STATUS:
2665 cmd->result |= ei->ScsiStatus;
2666
2667 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2668 sense_data_size = SCSI_SENSE_BUFFERSIZE;
2669 else
2670 sense_data_size = sizeof(ei->SenseInfo);
2671 if (ei->SenseLen < sense_data_size)
2672 sense_data_size = ei->SenseLen;
2673 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2674 if (ei->ScsiStatus)
2675 decode_sense_data(ei->SenseInfo, sense_data_size,
2676 &sense_key, &asc, &ascq);
2677 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2678 switch (sense_key) {
2679 case ABORTED_COMMAND:
2680 cmd->result |= DID_SOFT_ERROR << 16;
2681 break;
2682 case UNIT_ATTENTION:
2683 if (asc == 0x3F && ascq == 0x0E)
2684 h->drv_req_rescan = 1;
2685 break;
2686 case ILLEGAL_REQUEST:
2687 if (asc == 0x25 && ascq == 0x00) {
2688 dev->removed = 1;
2689 cmd->result = DID_NO_CONNECT << 16;
2690 }
2691 break;
2692 }
2693 break;
2694 }
2695
2696
2697
2698 if (ei->ScsiStatus) {
2699 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2700 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2701 "Returning result: 0x%x\n",
2702 cp, ei->ScsiStatus,
2703 sense_key, asc, ascq,
2704 cmd->result);
2705 } else {
2706 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2707 "Returning no connection.\n", cp),
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721 cmd->result = DID_NO_CONNECT << 16;
2722 }
2723 break;
2724
2725 case CMD_DATA_UNDERRUN:
2726 break;
2727 case CMD_DATA_OVERRUN:
2728 dev_warn(&h->pdev->dev,
2729 "CDB %16phN data overrun\n", cp->Request.CDB);
2730 break;
2731 case CMD_INVALID: {
2732
2733
2734
2735
2736
2737
2738
2739
2740 cmd->result = DID_NO_CONNECT << 16;
2741 }
2742 break;
2743 case CMD_PROTOCOL_ERR:
2744 cmd->result = DID_ERROR << 16;
2745 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2746 cp->Request.CDB);
2747 break;
2748 case CMD_HARDWARE_ERR:
2749 cmd->result = DID_ERROR << 16;
2750 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2751 cp->Request.CDB);
2752 break;
2753 case CMD_CONNECTION_LOST:
2754 cmd->result = DID_ERROR << 16;
2755 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2756 cp->Request.CDB);
2757 break;
2758 case CMD_ABORTED:
2759 cmd->result = DID_ABORT << 16;
2760 break;
2761 case CMD_ABORT_FAILED:
2762 cmd->result = DID_ERROR << 16;
2763 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2764 cp->Request.CDB);
2765 break;
2766 case CMD_UNSOLICITED_ABORT:
2767 cmd->result = DID_SOFT_ERROR << 16;
2768 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2769 cp->Request.CDB);
2770 break;
2771 case CMD_TIMEOUT:
2772 cmd->result = DID_TIME_OUT << 16;
2773 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2774 cp->Request.CDB);
2775 break;
2776 case CMD_UNABORTABLE:
2777 cmd->result = DID_ERROR << 16;
2778 dev_warn(&h->pdev->dev, "Command unabortable\n");
2779 break;
2780 case CMD_TMF_STATUS:
2781 if (hpsa_evaluate_tmf_status(h, cp))
2782 cmd->result = DID_ERROR << 16;
2783 break;
2784 case CMD_IOACCEL_DISABLED:
2785
2786
2787
2788 cmd->result = DID_SOFT_ERROR << 16;
2789 dev_warn(&h->pdev->dev,
2790 "cp %p had HP SSD Smart Path error\n", cp);
2791 break;
2792 default:
2793 cmd->result = DID_ERROR << 16;
2794 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2795 cp, ei->CommandStatus);
2796 }
2797
2798 return hpsa_cmd_free_and_done(h, cp, cmd);
2799}
2800
2801static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c,
2802 int sg_used, enum dma_data_direction data_direction)
2803{
2804 int i;
2805
2806 for (i = 0; i < sg_used; i++)
2807 dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr),
2808 le32_to_cpu(c->SG[i].Len),
2809 data_direction);
2810}
2811
2812static int hpsa_map_one(struct pci_dev *pdev,
2813 struct CommandList *cp,
2814 unsigned char *buf,
2815 size_t buflen,
2816 enum dma_data_direction data_direction)
2817{
2818 u64 addr64;
2819
2820 if (buflen == 0 || data_direction == DMA_NONE) {
2821 cp->Header.SGList = 0;
2822 cp->Header.SGTotal = cpu_to_le16(0);
2823 return 0;
2824 }
2825
2826 addr64 = dma_map_single(&pdev->dev, buf, buflen, data_direction);
2827 if (dma_mapping_error(&pdev->dev, addr64)) {
2828
2829 cp->Header.SGList = 0;
2830 cp->Header.SGTotal = cpu_to_le16(0);
2831 return -1;
2832 }
2833 cp->SG[0].Addr = cpu_to_le64(addr64);
2834 cp->SG[0].Len = cpu_to_le32(buflen);
2835 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST);
2836 cp->Header.SGList = 1;
2837 cp->Header.SGTotal = cpu_to_le16(1);
2838 return 0;
2839}
2840
2841#define NO_TIMEOUT ((unsigned long) -1)
2842#define DEFAULT_TIMEOUT 30000
2843static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2844 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2845{
2846 DECLARE_COMPLETION_ONSTACK(wait);
2847
2848 c->waiting = &wait;
2849 __enqueue_cmd_and_start_io(h, c, reply_queue);
2850 if (timeout_msecs == NO_TIMEOUT) {
2851
2852 wait_for_completion_io(&wait);
2853 return IO_OK;
2854 }
2855 if (!wait_for_completion_io_timeout(&wait,
2856 msecs_to_jiffies(timeout_msecs))) {
2857 dev_warn(&h->pdev->dev, "Command timed out.\n");
2858 return -ETIMEDOUT;
2859 }
2860 return IO_OK;
2861}
2862
2863static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2864 int reply_queue, unsigned long timeout_msecs)
2865{
2866 if (unlikely(lockup_detected(h))) {
2867 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2868 return IO_OK;
2869 }
2870 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2871}
2872
2873static u32 lockup_detected(struct ctlr_info *h)
2874{
2875 int cpu;
2876 u32 rc, *lockup_detected;
2877
2878 cpu = get_cpu();
2879 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2880 rc = *lockup_detected;
2881 put_cpu();
2882 return rc;
2883}
2884
2885#define MAX_DRIVER_CMD_RETRIES 25
2886static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2887 struct CommandList *c, enum dma_data_direction data_direction,
2888 unsigned long timeout_msecs)
2889{
2890 int backoff_time = 10, retry_count = 0;
2891 int rc;
2892
2893 do {
2894 memset(c->err_info, 0, sizeof(*c->err_info));
2895 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2896 timeout_msecs);
2897 if (rc)
2898 break;
2899 retry_count++;
2900 if (retry_count > 3) {
2901 msleep(backoff_time);
2902 if (backoff_time < 1000)
2903 backoff_time *= 2;
2904 }
2905 } while ((check_for_unit_attention(h, c) ||
2906 check_for_busy(h, c)) &&
2907 retry_count <= MAX_DRIVER_CMD_RETRIES);
2908 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2909 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2910 rc = -EIO;
2911 return rc;
2912}
2913
2914static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2915 struct CommandList *c)
2916{
2917 const u8 *cdb = c->Request.CDB;
2918 const u8 *lun = c->Header.LUN.LunAddrBytes;
2919
2920 dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
2921 txt, lun, cdb);
2922}
2923
2924static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2925 struct CommandList *cp)
2926{
2927 const struct ErrorInfo *ei = cp->err_info;
2928 struct device *d = &cp->h->pdev->dev;
2929 u8 sense_key, asc, ascq;
2930 int sense_len;
2931
2932 switch (ei->CommandStatus) {
2933 case CMD_TARGET_STATUS:
2934 if (ei->SenseLen > sizeof(ei->SenseInfo))
2935 sense_len = sizeof(ei->SenseInfo);
2936 else
2937 sense_len = ei->SenseLen;
2938 decode_sense_data(ei->SenseInfo, sense_len,
2939 &sense_key, &asc, &ascq);
2940 hpsa_print_cmd(h, "SCSI status", cp);
2941 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2942 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2943 sense_key, asc, ascq);
2944 else
2945 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2946 if (ei->ScsiStatus == 0)
2947 dev_warn(d, "SCSI status is abnormally zero. "
2948 "(probably indicates selection timeout "
2949 "reported incorrectly due to a known "
2950 "firmware bug, circa July, 2001.)\n");
2951 break;
2952 case CMD_DATA_UNDERRUN:
2953 break;
2954 case CMD_DATA_OVERRUN:
2955 hpsa_print_cmd(h, "overrun condition", cp);
2956 break;
2957 case CMD_INVALID: {
2958
2959
2960
2961 hpsa_print_cmd(h, "invalid command", cp);
2962 dev_warn(d, "probably means device no longer present\n");
2963 }
2964 break;
2965 case CMD_PROTOCOL_ERR:
2966 hpsa_print_cmd(h, "protocol error", cp);
2967 break;
2968 case CMD_HARDWARE_ERR:
2969 hpsa_print_cmd(h, "hardware error", cp);
2970 break;
2971 case CMD_CONNECTION_LOST:
2972 hpsa_print_cmd(h, "connection lost", cp);
2973 break;
2974 case CMD_ABORTED:
2975 hpsa_print_cmd(h, "aborted", cp);
2976 break;
2977 case CMD_ABORT_FAILED:
2978 hpsa_print_cmd(h, "abort failed", cp);
2979 break;
2980 case CMD_UNSOLICITED_ABORT:
2981 hpsa_print_cmd(h, "unsolicited abort", cp);
2982 break;
2983 case CMD_TIMEOUT:
2984 hpsa_print_cmd(h, "timed out", cp);
2985 break;
2986 case CMD_UNABORTABLE:
2987 hpsa_print_cmd(h, "unabortable", cp);
2988 break;
2989 case CMD_CTLR_LOCKUP:
2990 hpsa_print_cmd(h, "controller lockup detected", cp);
2991 break;
2992 default:
2993 hpsa_print_cmd(h, "unknown status", cp);
2994 dev_warn(d, "Unknown command status %x\n",
2995 ei->CommandStatus);
2996 }
2997}
2998
2999static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
3000 u8 page, u8 *buf, size_t bufsize)
3001{
3002 int rc = IO_OK;
3003 struct CommandList *c;
3004 struct ErrorInfo *ei;
3005
3006 c = cmd_alloc(h);
3007 if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize,
3008 page, scsi3addr, TYPE_CMD)) {
3009 rc = -1;
3010 goto out;
3011 }
3012 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3013 NO_TIMEOUT);
3014 if (rc)
3015 goto out;
3016 ei = c->err_info;
3017 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3018 hpsa_scsi_interpret_error(h, c);
3019 rc = -1;
3020 }
3021out:
3022 cmd_free(h, c);
3023 return rc;
3024}
3025
3026static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h,
3027 u8 *scsi3addr)
3028{
3029 u8 *buf;
3030 u64 sa = 0;
3031 int rc = 0;
3032
3033 buf = kzalloc(1024, GFP_KERNEL);
3034 if (!buf)
3035 return 0;
3036
3037 rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC,
3038 buf, 1024);
3039
3040 if (rc)
3041 goto out;
3042
3043 sa = get_unaligned_be64(buf+12);
3044
3045out:
3046 kfree(buf);
3047 return sa;
3048}
3049
3050static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
3051 u16 page, unsigned char *buf,
3052 unsigned char bufsize)
3053{
3054 int rc = IO_OK;
3055 struct CommandList *c;
3056 struct ErrorInfo *ei;
3057
3058 c = cmd_alloc(h);
3059
3060 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
3061 page, scsi3addr, TYPE_CMD)) {
3062 rc = -1;
3063 goto out;
3064 }
3065 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3066 NO_TIMEOUT);
3067 if (rc)
3068 goto out;
3069 ei = c->err_info;
3070 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3071 hpsa_scsi_interpret_error(h, c);
3072 rc = -1;
3073 }
3074out:
3075 cmd_free(h, c);
3076 return rc;
3077}
3078
3079static int hpsa_send_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3080 u8 reset_type, int reply_queue)
3081{
3082 int rc = IO_OK;
3083 struct CommandList *c;
3084 struct ErrorInfo *ei;
3085
3086 c = cmd_alloc(h);
3087 c->device = dev;
3088
3089
3090 (void) fill_cmd(c, reset_type, h, NULL, 0, 0, dev->scsi3addr, TYPE_MSG);
3091 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
3092 if (rc) {
3093 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
3094 goto out;
3095 }
3096
3097
3098 ei = c->err_info;
3099 if (ei->CommandStatus != 0) {
3100 hpsa_scsi_interpret_error(h, c);
3101 rc = -1;
3102 }
3103out:
3104 cmd_free(h, c);
3105 return rc;
3106}
3107
3108static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
3109 struct hpsa_scsi_dev_t *dev,
3110 unsigned char *scsi3addr)
3111{
3112 int i;
3113 bool match = false;
3114 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
3115 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
3116
3117 if (hpsa_is_cmd_idle(c))
3118 return false;
3119
3120 switch (c->cmd_type) {
3121 case CMD_SCSI:
3122 case CMD_IOCTL_PEND:
3123 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
3124 sizeof(c->Header.LUN.LunAddrBytes));
3125 break;
3126
3127 case CMD_IOACCEL1:
3128 case CMD_IOACCEL2:
3129 if (c->phys_disk == dev) {
3130
3131 match = true;
3132 } else {
3133
3134
3135
3136
3137 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3138
3139
3140
3141
3142 match = dev->phys_disk[i] == c->phys_disk;
3143 }
3144 }
3145 break;
3146
3147 case IOACCEL2_TMF:
3148 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3149 match = dev->phys_disk[i]->ioaccel_handle ==
3150 le32_to_cpu(ac->it_nexus);
3151 }
3152 break;
3153
3154 case 0:
3155 match = false;
3156 break;
3157
3158 default:
3159 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
3160 c->cmd_type);
3161 BUG();
3162 }
3163
3164 return match;
3165}
3166
3167static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3168 u8 reset_type, int reply_queue)
3169{
3170 int rc = 0;
3171
3172
3173 if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
3174 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
3175 return -EINTR;
3176 }
3177
3178 rc = hpsa_send_reset(h, dev, reset_type, reply_queue);
3179 if (!rc) {
3180
3181 atomic_dec(&dev->commands_outstanding);
3182 wait_event(h->event_sync_wait_queue,
3183 atomic_read(&dev->commands_outstanding) <= 0 ||
3184 lockup_detected(h));
3185 }
3186
3187 if (unlikely(lockup_detected(h))) {
3188 dev_warn(&h->pdev->dev,
3189 "Controller lockup detected during reset wait\n");
3190 rc = -ENODEV;
3191 }
3192
3193 if (!rc)
3194 rc = wait_for_device_to_become_ready(h, dev->scsi3addr, 0);
3195
3196 mutex_unlock(&h->reset_mutex);
3197 return rc;
3198}
3199
3200static void hpsa_get_raid_level(struct ctlr_info *h,
3201 unsigned char *scsi3addr, unsigned char *raid_level)
3202{
3203 int rc;
3204 unsigned char *buf;
3205
3206 *raid_level = RAID_UNKNOWN;
3207 buf = kzalloc(64, GFP_KERNEL);
3208 if (!buf)
3209 return;
3210
3211 if (!hpsa_vpd_page_supported(h, scsi3addr,
3212 HPSA_VPD_LV_DEVICE_GEOMETRY))
3213 goto exit;
3214
3215 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3216 HPSA_VPD_LV_DEVICE_GEOMETRY, buf, 64);
3217
3218 if (rc == 0)
3219 *raid_level = buf[8];
3220 if (*raid_level > RAID_UNKNOWN)
3221 *raid_level = RAID_UNKNOWN;
3222exit:
3223 kfree(buf);
3224 return;
3225}
3226
3227#define HPSA_MAP_DEBUG
3228#ifdef HPSA_MAP_DEBUG
3229static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
3230 struct raid_map_data *map_buff)
3231{
3232 struct raid_map_disk_data *dd = &map_buff->data[0];
3233 int map, row, col;
3234 u16 map_cnt, row_cnt, disks_per_row;
3235
3236 if (rc != 0)
3237 return;
3238
3239
3240 if (h->raid_offload_debug < 2)
3241 return;
3242
3243 dev_info(&h->pdev->dev, "structure_size = %u\n",
3244 le32_to_cpu(map_buff->structure_size));
3245 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
3246 le32_to_cpu(map_buff->volume_blk_size));
3247 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
3248 le64_to_cpu(map_buff->volume_blk_cnt));
3249 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
3250 map_buff->phys_blk_shift);
3251 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
3252 map_buff->parity_rotation_shift);
3253 dev_info(&h->pdev->dev, "strip_size = %u\n",
3254 le16_to_cpu(map_buff->strip_size));
3255 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
3256 le64_to_cpu(map_buff->disk_starting_blk));
3257 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
3258 le64_to_cpu(map_buff->disk_blk_cnt));
3259 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
3260 le16_to_cpu(map_buff->data_disks_per_row));
3261 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
3262 le16_to_cpu(map_buff->metadata_disks_per_row));
3263 dev_info(&h->pdev->dev, "row_cnt = %u\n",
3264 le16_to_cpu(map_buff->row_cnt));
3265 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
3266 le16_to_cpu(map_buff->layout_map_count));
3267 dev_info(&h->pdev->dev, "flags = 0x%x\n",
3268 le16_to_cpu(map_buff->flags));
3269 dev_info(&h->pdev->dev, "encryption = %s\n",
3270 le16_to_cpu(map_buff->flags) &
3271 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
3272 dev_info(&h->pdev->dev, "dekindex = %u\n",
3273 le16_to_cpu(map_buff->dekindex));
3274 map_cnt = le16_to_cpu(map_buff->layout_map_count);
3275 for (map = 0; map < map_cnt; map++) {
3276 dev_info(&h->pdev->dev, "Map%u:\n", map);
3277 row_cnt = le16_to_cpu(map_buff->row_cnt);
3278 for (row = 0; row < row_cnt; row++) {
3279 dev_info(&h->pdev->dev, " Row%u:\n", row);
3280 disks_per_row =
3281 le16_to_cpu(map_buff->data_disks_per_row);
3282 for (col = 0; col < disks_per_row; col++, dd++)
3283 dev_info(&h->pdev->dev,
3284 " D%02u: h=0x%04x xor=%u,%u\n",
3285 col, dd->ioaccel_handle,
3286 dd->xor_mult[0], dd->xor_mult[1]);
3287 disks_per_row =
3288 le16_to_cpu(map_buff->metadata_disks_per_row);
3289 for (col = 0; col < disks_per_row; col++, dd++)
3290 dev_info(&h->pdev->dev,
3291 " M%02u: h=0x%04x xor=%u,%u\n",
3292 col, dd->ioaccel_handle,
3293 dd->xor_mult[0], dd->xor_mult[1]);
3294 }
3295 }
3296}
3297#else
3298static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
3299 __attribute__((unused)) int rc,
3300 __attribute__((unused)) struct raid_map_data *map_buff)
3301{
3302}
3303#endif
3304
3305static int hpsa_get_raid_map(struct ctlr_info *h,
3306 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3307{
3308 int rc = 0;
3309 struct CommandList *c;
3310 struct ErrorInfo *ei;
3311
3312 c = cmd_alloc(h);
3313
3314 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
3315 sizeof(this_device->raid_map), 0,
3316 scsi3addr, TYPE_CMD)) {
3317 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
3318 cmd_free(h, c);
3319 return -1;
3320 }
3321 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3322 NO_TIMEOUT);
3323 if (rc)
3324 goto out;
3325 ei = c->err_info;
3326 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3327 hpsa_scsi_interpret_error(h, c);
3328 rc = -1;
3329 goto out;
3330 }
3331 cmd_free(h, c);
3332
3333
3334 if (le32_to_cpu(this_device->raid_map.structure_size) >
3335 sizeof(this_device->raid_map)) {
3336 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
3337 rc = -1;
3338 }
3339 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
3340 return rc;
3341out:
3342 cmd_free(h, c);
3343 return rc;
3344}
3345
3346static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
3347 unsigned char scsi3addr[], u16 bmic_device_index,
3348 struct bmic_sense_subsystem_info *buf, size_t bufsize)
3349{
3350 int rc = IO_OK;
3351 struct CommandList *c;
3352 struct ErrorInfo *ei;
3353
3354 c = cmd_alloc(h);
3355
3356 rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize,
3357 0, RAID_CTLR_LUNID, TYPE_CMD);
3358 if (rc)
3359 goto out;
3360
3361 c->Request.CDB[2] = bmic_device_index & 0xff;
3362 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3363
3364 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3365 NO_TIMEOUT);
3366 if (rc)
3367 goto out;
3368 ei = c->err_info;
3369 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3370 hpsa_scsi_interpret_error(h, c);
3371 rc = -1;
3372 }
3373out:
3374 cmd_free(h, c);
3375 return rc;
3376}
3377
3378static int hpsa_bmic_id_controller(struct ctlr_info *h,
3379 struct bmic_identify_controller *buf, size_t bufsize)
3380{
3381 int rc = IO_OK;
3382 struct CommandList *c;
3383 struct ErrorInfo *ei;
3384
3385 c = cmd_alloc(h);
3386
3387 rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize,
3388 0, RAID_CTLR_LUNID, TYPE_CMD);
3389 if (rc)
3390 goto out;
3391
3392 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3393 NO_TIMEOUT);
3394 if (rc)
3395 goto out;
3396 ei = c->err_info;
3397 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3398 hpsa_scsi_interpret_error(h, c);
3399 rc = -1;
3400 }
3401out:
3402 cmd_free(h, c);
3403 return rc;
3404}
3405
3406static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
3407 unsigned char scsi3addr[], u16 bmic_device_index,
3408 struct bmic_identify_physical_device *buf, size_t bufsize)
3409{
3410 int rc = IO_OK;
3411 struct CommandList *c;
3412 struct ErrorInfo *ei;
3413
3414 c = cmd_alloc(h);
3415 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
3416 0, RAID_CTLR_LUNID, TYPE_CMD);
3417 if (rc)
3418 goto out;
3419
3420 c->Request.CDB[2] = bmic_device_index & 0xff;
3421 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3422
3423 hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3424 NO_TIMEOUT);
3425 ei = c->err_info;
3426 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3427 hpsa_scsi_interpret_error(h, c);
3428 rc = -1;
3429 }
3430out:
3431 cmd_free(h, c);
3432
3433 return rc;
3434}
3435
3436
3437
3438
3439
3440
3441
3442static void hpsa_get_enclosure_info(struct ctlr_info *h,
3443 unsigned char *scsi3addr,
3444 struct ReportExtendedLUNdata *rlep, int rle_index,
3445 struct hpsa_scsi_dev_t *encl_dev)
3446{
3447 int rc = -1;
3448 struct CommandList *c = NULL;
3449 struct ErrorInfo *ei = NULL;
3450 struct bmic_sense_storage_box_params *bssbp = NULL;
3451 struct bmic_identify_physical_device *id_phys = NULL;
3452 struct ext_report_lun_entry *rle;
3453 u16 bmic_device_index = 0;
3454
3455 if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN)
3456 return;
3457
3458 rle = &rlep->LUN[rle_index];
3459
3460 encl_dev->eli =
3461 hpsa_get_enclosure_logical_identifier(h, scsi3addr);
3462
3463 bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
3464
3465 if (encl_dev->target == -1 || encl_dev->lun == -1) {
3466 rc = IO_OK;
3467 goto out;
3468 }
3469
3470 if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) {
3471 rc = IO_OK;
3472 goto out;
3473 }
3474
3475 bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL);
3476 if (!bssbp)
3477 goto out;
3478
3479 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3480 if (!id_phys)
3481 goto out;
3482
3483 rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index,
3484 id_phys, sizeof(*id_phys));
3485 if (rc) {
3486 dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n",
3487 __func__, encl_dev->external, bmic_device_index);
3488 goto out;
3489 }
3490
3491 c = cmd_alloc(h);
3492
3493 rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp,
3494 sizeof(*bssbp), 0, RAID_CTLR_LUNID, TYPE_CMD);
3495
3496 if (rc)
3497 goto out;
3498
3499 if (id_phys->phys_connector[1] == 'E')
3500 c->Request.CDB[5] = id_phys->box_index;
3501 else
3502 c->Request.CDB[5] = 0;
3503
3504 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3505 NO_TIMEOUT);
3506 if (rc)
3507 goto out;
3508
3509 ei = c->err_info;
3510 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3511 rc = -1;
3512 goto out;
3513 }
3514
3515 encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port;
3516 memcpy(&encl_dev->phys_connector[id_phys->active_path_number],
3517 bssbp->phys_connector, sizeof(bssbp->phys_connector));
3518
3519 rc = IO_OK;
3520out:
3521 kfree(bssbp);
3522 kfree(id_phys);
3523
3524 if (c)
3525 cmd_free(h, c);
3526
3527 if (rc != IO_OK)
3528 hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
3529 "Error, could not get enclosure information");
3530}
3531
3532static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
3533 unsigned char *scsi3addr)
3534{
3535 struct ReportExtendedLUNdata *physdev;
3536 u32 nphysicals;
3537 u64 sa = 0;
3538 int i;
3539
3540 physdev = kzalloc(sizeof(*physdev), GFP_KERNEL);
3541 if (!physdev)
3542 return 0;
3543
3544 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3545 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3546 kfree(physdev);
3547 return 0;
3548 }
3549 nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24;
3550
3551 for (i = 0; i < nphysicals; i++)
3552 if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) {
3553 sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]);
3554 break;
3555 }
3556
3557 kfree(physdev);
3558
3559 return sa;
3560}
3561
3562static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
3563 struct hpsa_scsi_dev_t *dev)
3564{
3565 int rc;
3566 u64 sa = 0;
3567
3568 if (is_hba_lunid(scsi3addr)) {
3569 struct bmic_sense_subsystem_info *ssi;
3570
3571 ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
3572 if (!ssi)
3573 return;
3574
3575 rc = hpsa_bmic_sense_subsystem_information(h,
3576 scsi3addr, 0, ssi, sizeof(*ssi));
3577 if (rc == 0) {
3578 sa = get_unaligned_be64(ssi->primary_world_wide_id);
3579 h->sas_address = sa;
3580 }
3581
3582 kfree(ssi);
3583 } else
3584 sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr);
3585
3586 dev->sas_address = sa;
3587}
3588
3589static void hpsa_ext_ctrl_present(struct ctlr_info *h,
3590 struct ReportExtendedLUNdata *physdev)
3591{
3592 u32 nphysicals;
3593 int i;
3594
3595 if (h->discovery_polling)
3596 return;
3597
3598 nphysicals = (get_unaligned_be32(physdev->LUNListLength) / 24) + 1;
3599
3600 for (i = 0; i < nphysicals; i++) {
3601 if (physdev->LUN[i].device_type ==
3602 BMIC_DEVICE_TYPE_CONTROLLER
3603 && !is_hba_lunid(physdev->LUN[i].lunid)) {
3604 dev_info(&h->pdev->dev,
3605 "External controller present, activate discovery polling and disable rld caching\n");
3606 hpsa_disable_rld_caching(h);
3607 h->discovery_polling = 1;
3608 break;
3609 }
3610 }
3611}
3612
3613
3614static bool hpsa_vpd_page_supported(struct ctlr_info *h,
3615 unsigned char scsi3addr[], u8 page)
3616{
3617 int rc;
3618 int i;
3619 int pages;
3620 unsigned char *buf, bufsize;
3621
3622 buf = kzalloc(256, GFP_KERNEL);
3623 if (!buf)
3624 return false;
3625
3626
3627 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3628 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3629 buf, HPSA_VPD_HEADER_SZ);
3630 if (rc != 0)
3631 goto exit_unsupported;
3632 pages = buf[3];
3633 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
3634 bufsize = pages + HPSA_VPD_HEADER_SZ;
3635 else
3636 bufsize = 255;
3637
3638
3639 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3640 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3641 buf, bufsize);
3642 if (rc != 0)
3643 goto exit_unsupported;
3644
3645 pages = buf[3];
3646 for (i = 1; i <= pages; i++)
3647 if (buf[3 + i] == page)
3648 goto exit_supported;
3649exit_unsupported:
3650 kfree(buf);
3651 return false;
3652exit_supported:
3653 kfree(buf);
3654 return true;
3655}
3656
3657
3658
3659
3660
3661
3662
3663
3664static void hpsa_get_ioaccel_status(struct ctlr_info *h,
3665 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3666{
3667 int rc;
3668 unsigned char *buf;
3669 u8 ioaccel_status;
3670
3671 this_device->offload_config = 0;
3672 this_device->offload_enabled = 0;
3673 this_device->offload_to_be_enabled = 0;
3674
3675 buf = kzalloc(64, GFP_KERNEL);
3676 if (!buf)
3677 return;
3678 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
3679 goto out;
3680 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3681 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
3682 if (rc != 0)
3683 goto out;
3684
3685#define IOACCEL_STATUS_BYTE 4
3686#define OFFLOAD_CONFIGURED_BIT 0x01
3687#define OFFLOAD_ENABLED_BIT 0x02
3688 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
3689 this_device->offload_config =
3690 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
3691 if (this_device->offload_config) {
3692 bool offload_enabled =
3693 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3694
3695
3696
3697 if (offload_enabled) {
3698 rc = hpsa_get_raid_map(h, scsi3addr, this_device);
3699 if (rc)
3700 goto out;
3701 this_device->offload_to_be_enabled = 1;
3702 }
3703 }
3704
3705out:
3706 kfree(buf);
3707 return;
3708}
3709
3710
3711static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
3712 unsigned char *device_id, int index, int buflen)
3713{
3714 int rc;
3715 unsigned char *buf;
3716
3717
3718 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID))
3719 return 1;
3720
3721 buf = kzalloc(64, GFP_KERNEL);
3722 if (!buf)
3723 return -ENOMEM;
3724
3725 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3726 HPSA_VPD_LV_DEVICE_ID, buf, 64);
3727 if (rc == 0) {
3728 if (buflen > 16)
3729 buflen = 16;
3730 memcpy(device_id, &buf[8], buflen);
3731 }
3732
3733 kfree(buf);
3734
3735 return rc;
3736}
3737
3738static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
3739 void *buf, int bufsize,
3740 int extended_response)
3741{
3742 int rc = IO_OK;
3743 struct CommandList *c;
3744 unsigned char scsi3addr[8];
3745 struct ErrorInfo *ei;
3746
3747 c = cmd_alloc(h);
3748
3749
3750 memset(scsi3addr, 0, sizeof(scsi3addr));
3751 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
3752 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
3753 rc = -EAGAIN;
3754 goto out;
3755 }
3756 if (extended_response)
3757 c->Request.CDB[1] = extended_response;
3758 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3759 NO_TIMEOUT);
3760 if (rc)
3761 goto out;
3762 ei = c->err_info;
3763 if (ei->CommandStatus != 0 &&
3764 ei->CommandStatus != CMD_DATA_UNDERRUN) {
3765 hpsa_scsi_interpret_error(h, c);
3766 rc = -EIO;
3767 } else {
3768 struct ReportLUNdata *rld = buf;
3769
3770 if (rld->extended_response_flag != extended_response) {
3771 if (!h->legacy_board) {
3772 dev_err(&h->pdev->dev,
3773 "report luns requested format %u, got %u\n",
3774 extended_response,
3775 rld->extended_response_flag);
3776 rc = -EINVAL;
3777 } else
3778 rc = -EOPNOTSUPP;
3779 }
3780 }
3781out:
3782 cmd_free(h, c);
3783 return rc;
3784}
3785
3786static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
3787 struct ReportExtendedLUNdata *buf, int bufsize)
3788{
3789 int rc;
3790 struct ReportLUNdata *lbuf;
3791
3792 rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3793 HPSA_REPORT_PHYS_EXTENDED);
3794 if (!rc || rc != -EOPNOTSUPP)
3795 return rc;
3796
3797
3798 lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL);
3799 if (!lbuf)
3800 return -ENOMEM;
3801
3802 rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0);
3803 if (!rc) {
3804 int i;
3805 u32 nphys;
3806
3807
3808 memcpy(buf, lbuf, 8);
3809 nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8;
3810 for (i = 0; i < nphys; i++)
3811 memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8);
3812 }
3813 kfree(lbuf);
3814 return rc;
3815}
3816
3817static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
3818 struct ReportLUNdata *buf, int bufsize)
3819{
3820 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
3821}
3822
3823static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
3824 int bus, int target, int lun)
3825{
3826 device->bus = bus;
3827 device->target = target;
3828 device->lun = lun;
3829}
3830
3831
3832static int hpsa_get_volume_status(struct ctlr_info *h,
3833 unsigned char scsi3addr[])
3834{
3835 int rc;
3836 int status;
3837 int size;
3838 unsigned char *buf;
3839
3840 buf = kzalloc(64, GFP_KERNEL);
3841 if (!buf)
3842 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3843
3844
3845 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
3846 goto exit_failed;
3847
3848
3849 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3850 buf, HPSA_VPD_HEADER_SZ);
3851 if (rc != 0)
3852 goto exit_failed;
3853 size = buf[3];
3854
3855
3856 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3857 buf, size + HPSA_VPD_HEADER_SZ);
3858 if (rc != 0)
3859 goto exit_failed;
3860 status = buf[4];
3861
3862 kfree(buf);
3863 return status;
3864exit_failed:
3865 kfree(buf);
3866 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3867}
3868
3869
3870
3871
3872
3873
3874
3875
3876static unsigned char hpsa_volume_offline(struct ctlr_info *h,
3877 unsigned char scsi3addr[])
3878{
3879 struct CommandList *c;
3880 unsigned char *sense;
3881 u8 sense_key, asc, ascq;
3882 int sense_len;
3883 int rc, ldstat = 0;
3884#define ASC_LUN_NOT_READY 0x04
3885#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3886#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3887
3888 c = cmd_alloc(h);
3889
3890 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
3891 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
3892 NO_TIMEOUT);
3893 if (rc) {
3894 cmd_free(h, c);
3895 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3896 }
3897 sense = c->err_info->SenseInfo;
3898 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3899 sense_len = sizeof(c->err_info->SenseInfo);
3900 else
3901 sense_len = c->err_info->SenseLen;
3902 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
3903 cmd_free(h, c);
3904
3905
3906 ldstat = hpsa_get_volume_status(h, scsi3addr);
3907
3908
3909 switch (ldstat) {
3910 case HPSA_LV_FAILED:
3911 case HPSA_LV_UNDERGOING_ERASE:
3912 case HPSA_LV_NOT_AVAILABLE:
3913 case HPSA_LV_UNDERGOING_RPI:
3914 case HPSA_LV_PENDING_RPI:
3915 case HPSA_LV_ENCRYPTED_NO_KEY:
3916 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3917 case HPSA_LV_UNDERGOING_ENCRYPTION:
3918 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3919 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3920 return ldstat;
3921 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3922
3923
3924
3925 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3926 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3927 return ldstat;
3928 break;
3929 default:
3930 break;
3931 }
3932 return HPSA_LV_OK;
3933}
3934
3935static int hpsa_update_device_info(struct ctlr_info *h,
3936 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3937 unsigned char *is_OBDR_device)
3938{
3939
3940#define OBDR_SIG_OFFSET 43
3941#define OBDR_TAPE_SIG "$DR-10"
3942#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3943#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3944
3945 unsigned char *inq_buff;
3946 unsigned char *obdr_sig;
3947 int rc = 0;
3948
3949 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
3950 if (!inq_buff) {
3951 rc = -ENOMEM;
3952 goto bail_out;
3953 }
3954
3955
3956 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3957 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3958 dev_err(&h->pdev->dev,
3959 "%s: inquiry failed, device will be skipped.\n",
3960 __func__);
3961 rc = HPSA_INQUIRY_FAILED;
3962 goto bail_out;
3963 }
3964
3965 scsi_sanitize_inquiry_string(&inq_buff[8], 8);
3966 scsi_sanitize_inquiry_string(&inq_buff[16], 16);
3967
3968 this_device->devtype = (inq_buff[0] & 0x1f);
3969 memcpy(this_device->scsi3addr, scsi3addr, 8);
3970 memcpy(this_device->vendor, &inq_buff[8],
3971 sizeof(this_device->vendor));
3972 memcpy(this_device->model, &inq_buff[16],
3973 sizeof(this_device->model));
3974 this_device->rev = inq_buff[2];
3975 memset(this_device->device_id, 0,
3976 sizeof(this_device->device_id));
3977 if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
3978 sizeof(this_device->device_id)) < 0) {
3979 dev_err(&h->pdev->dev,
3980 "hpsa%d: %s: can't get device id for [%d:%d:%d:%d]\t%s\t%.16s\n",
3981 h->ctlr, __func__,
3982 h->scsi_host->host_no,
3983 this_device->bus, this_device->target,
3984 this_device->lun,
3985 scsi_device_type(this_device->devtype),
3986 this_device->model);
3987 rc = HPSA_LV_FAILED;
3988 goto bail_out;
3989 }
3990
3991 if ((this_device->devtype == TYPE_DISK ||
3992 this_device->devtype == TYPE_ZBC) &&
3993 is_logical_dev_addr_mode(scsi3addr)) {
3994 unsigned char volume_offline;
3995
3996 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
3997 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3998 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
3999 volume_offline = hpsa_volume_offline(h, scsi3addr);
4000 if (volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED &&
4001 h->legacy_board) {
4002
4003
4004
4005 dev_info(&h->pdev->dev,
4006 "C0:T%d:L%d Volume status not available, assuming online.\n",
4007 this_device->target, this_device->lun);
4008 volume_offline = 0;
4009 }
4010 this_device->volume_offline = volume_offline;
4011 if (volume_offline == HPSA_LV_FAILED) {
4012 rc = HPSA_LV_FAILED;
4013 dev_err(&h->pdev->dev,
4014 "%s: LV failed, device will be skipped.\n",
4015 __func__);
4016 goto bail_out;
4017 }
4018 } else {
4019 this_device->raid_level = RAID_UNKNOWN;
4020 this_device->offload_config = 0;
4021 hpsa_turn_off_ioaccel_for_device(this_device);
4022 this_device->hba_ioaccel_enabled = 0;
4023 this_device->volume_offline = 0;
4024 this_device->queue_depth = h->nr_cmds;
4025 }
4026
4027 if (this_device->external)
4028 this_device->queue_depth = EXTERNAL_QD;
4029
4030 if (is_OBDR_device) {
4031
4032
4033
4034 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
4035 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
4036 strncmp(obdr_sig, OBDR_TAPE_SIG,
4037 OBDR_SIG_LEN) == 0);
4038 }
4039 kfree(inq_buff);
4040 return 0;
4041
4042bail_out:
4043 kfree(inq_buff);
4044 return rc;
4045}
4046
4047
4048
4049
4050
4051
4052
4053static void figure_bus_target_lun(struct ctlr_info *h,
4054 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
4055{
4056 u32 lunid = get_unaligned_le32(lunaddrbytes);
4057
4058 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
4059
4060 if (is_hba_lunid(lunaddrbytes)) {
4061 int bus = HPSA_HBA_BUS;
4062
4063 if (!device->rev)
4064 bus = HPSA_LEGACY_HBA_BUS;
4065 hpsa_set_bus_target_lun(device,
4066 bus, 0, lunid & 0x3fff);
4067 } else
4068
4069 hpsa_set_bus_target_lun(device,
4070 HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
4071 return;
4072 }
4073
4074 if (device->external) {
4075 hpsa_set_bus_target_lun(device,
4076 HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff,
4077 lunid & 0x00ff);
4078 return;
4079 }
4080 hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS,
4081 0, lunid & 0x3fff);
4082}
4083
4084static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position,
4085 int i, int nphysicals, int nlocal_logicals)
4086{
4087
4088
4089
4090 int logicals_start = nphysicals + (raid_ctlr_position == 0);
4091
4092 if (i == raid_ctlr_position)
4093 return 0;
4094
4095 if (i < logicals_start)
4096 return 0;
4097
4098
4099 if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals)
4100 return 0;
4101
4102 return 1;
4103}
4104
4105
4106
4107
4108
4109
4110
4111static int hpsa_gather_lun_info(struct ctlr_info *h,
4112 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
4113 struct ReportLUNdata *logdev, u32 *nlogicals)
4114{
4115 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
4116 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
4117 return -1;
4118 }
4119 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
4120 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
4121 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
4122 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
4123 *nphysicals = HPSA_MAX_PHYS_LUN;
4124 }
4125 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
4126 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
4127 return -1;
4128 }
4129 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
4130
4131 if (*nlogicals > HPSA_MAX_LUN) {
4132 dev_warn(&h->pdev->dev,
4133 "maximum logical LUNs (%d) exceeded. "
4134 "%d LUNs ignored.\n", HPSA_MAX_LUN,
4135 *nlogicals - HPSA_MAX_LUN);
4136 *nlogicals = HPSA_MAX_LUN;
4137 }
4138 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
4139 dev_warn(&h->pdev->dev,
4140 "maximum logical + physical LUNs (%d) exceeded. "
4141 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
4142 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
4143 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
4144 }
4145 return 0;
4146}
4147
4148static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
4149 int i, int nphysicals, int nlogicals,
4150 struct ReportExtendedLUNdata *physdev_list,
4151 struct ReportLUNdata *logdev_list)
4152{
4153
4154
4155
4156
4157
4158 int logicals_start = nphysicals + (raid_ctlr_position == 0);
4159 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
4160
4161 if (i == raid_ctlr_position)
4162 return RAID_CTLR_LUNID;
4163
4164 if (i < logicals_start)
4165 return &physdev_list->LUN[i -
4166 (raid_ctlr_position == 0)].lunid[0];
4167
4168 if (i < last_device)
4169 return &logdev_list->LUN[i - nphysicals -
4170 (raid_ctlr_position == 0)][0];
4171 BUG();
4172 return NULL;
4173}
4174
4175
4176static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
4177 struct hpsa_scsi_dev_t *dev,
4178 struct ReportExtendedLUNdata *rlep, int rle_index,
4179 struct bmic_identify_physical_device *id_phys)
4180{
4181 int rc;
4182 struct ext_report_lun_entry *rle;
4183
4184 if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN)
4185 return;
4186
4187 rle = &rlep->LUN[rle_index];
4188
4189 dev->ioaccel_handle = rle->ioaccel_handle;
4190 if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
4191 dev->hba_ioaccel_enabled = 1;
4192 memset(id_phys, 0, sizeof(*id_phys));
4193 rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0],
4194 GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys,
4195 sizeof(*id_phys));
4196 if (!rc)
4197
4198#define DRIVE_CMDS_RESERVED_FOR_FW 2
4199#define DRIVE_QUEUE_DEPTH 7
4200 dev->queue_depth =
4201 le16_to_cpu(id_phys->current_queue_depth_limit) -
4202 DRIVE_CMDS_RESERVED_FOR_FW;
4203 else
4204 dev->queue_depth = DRIVE_QUEUE_DEPTH;
4205}
4206
4207static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
4208 struct ReportExtendedLUNdata *rlep, int rle_index,
4209 struct bmic_identify_physical_device *id_phys)
4210{
4211 struct ext_report_lun_entry *rle;
4212
4213 if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN)
4214 return;
4215
4216 rle = &rlep->LUN[rle_index];
4217
4218 if ((rle->device_flags & 0x08) && this_device->ioaccel_handle)
4219 this_device->hba_ioaccel_enabled = 1;
4220
4221 memcpy(&this_device->active_path_index,
4222 &id_phys->active_path_number,
4223 sizeof(this_device->active_path_index));
4224 memcpy(&this_device->path_map,
4225 &id_phys->redundant_path_present_map,
4226 sizeof(this_device->path_map));
4227 memcpy(&this_device->box,
4228 &id_phys->alternate_paths_phys_box_on_port,
4229 sizeof(this_device->box));
4230 memcpy(&this_device->phys_connector,
4231 &id_phys->alternate_paths_phys_connector,
4232 sizeof(this_device->phys_connector));
4233 memcpy(&this_device->bay,
4234 &id_phys->phys_bay_in_box,
4235 sizeof(this_device->bay));
4236}
4237
4238
4239static int hpsa_set_local_logical_count(struct ctlr_info *h,
4240 struct bmic_identify_controller *id_ctlr,
4241 u32 *nlocals)
4242{
4243 int rc;
4244
4245 if (!id_ctlr) {
4246 dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n",
4247 __func__);
4248 return -ENOMEM;
4249 }
4250 memset(id_ctlr, 0, sizeof(*id_ctlr));
4251 rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
4252 if (!rc)
4253 if (id_ctlr->configured_logical_drive_count < 255)
4254 *nlocals = id_ctlr->configured_logical_drive_count;
4255 else
4256 *nlocals = le16_to_cpu(
4257 id_ctlr->extended_logical_unit_count);
4258 else
4259 *nlocals = -1;
4260 return rc;
4261}
4262
4263static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes)
4264{
4265 struct bmic_identify_physical_device *id_phys;
4266 bool is_spare = false;
4267 int rc;
4268
4269 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4270 if (!id_phys)
4271 return false;
4272
4273 rc = hpsa_bmic_id_physical_device(h,
4274 lunaddrbytes,
4275 GET_BMIC_DRIVE_NUMBER(lunaddrbytes),
4276 id_phys, sizeof(*id_phys));
4277 if (rc == 0)
4278 is_spare = (id_phys->more_flags >> 6) & 0x01;
4279
4280 kfree(id_phys);
4281 return is_spare;
4282}
4283
4284#define RPL_DEV_FLAG_NON_DISK 0x1
4285#define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2
4286#define RPL_DEV_FLAG_UNCONFIG_DISK 0x4
4287
4288#define BMIC_DEVICE_TYPE_ENCLOSURE 6
4289
4290static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes,
4291 struct ext_report_lun_entry *rle)
4292{
4293 u8 device_flags;
4294 u8 device_type;
4295
4296 if (!MASKED_DEVICE(lunaddrbytes))
4297 return false;
4298
4299 device_flags = rle->device_flags;
4300 device_type = rle->device_type;
4301
4302 if (device_flags & RPL_DEV_FLAG_NON_DISK) {
4303 if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE)
4304 return false;
4305 return true;
4306 }
4307
4308 if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED))
4309 return false;
4310
4311 if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK)
4312 return false;
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322 if (hpsa_is_disk_spare(h, lunaddrbytes))
4323 return true;
4324
4325 return false;
4326}
4327
4328static void hpsa_update_scsi_devices(struct ctlr_info *h)
4329{
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340 struct ReportExtendedLUNdata *physdev_list = NULL;
4341 struct ReportLUNdata *logdev_list = NULL;
4342 struct bmic_identify_physical_device *id_phys = NULL;
4343 struct bmic_identify_controller *id_ctlr = NULL;
4344 u32 nphysicals = 0;
4345 u32 nlogicals = 0;
4346 u32 nlocal_logicals = 0;
4347 u32 ndev_allocated = 0;
4348 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
4349 int ncurrent = 0;
4350 int i, ndevs_to_allocate;
4351 int raid_ctlr_position;
4352 bool physical_device;
4353 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
4354
4355 currentsd = kcalloc(HPSA_MAX_DEVICES, sizeof(*currentsd), GFP_KERNEL);
4356 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
4357 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
4358 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
4359 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4360 id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL);
4361
4362 if (!currentsd || !physdev_list || !logdev_list ||
4363 !tmpdevice || !id_phys || !id_ctlr) {
4364 dev_err(&h->pdev->dev, "out of memory\n");
4365 goto out;
4366 }
4367 memset(lunzerobits, 0, sizeof(lunzerobits));
4368
4369 h->drv_req_rescan = 0;
4370
4371 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
4372 logdev_list, &nlogicals)) {
4373 h->drv_req_rescan = 1;
4374 goto out;
4375 }
4376
4377
4378 if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) {
4379 dev_warn(&h->pdev->dev,
4380 "%s: Can't determine number of local logical devices.\n",
4381 __func__);
4382 }
4383
4384
4385
4386
4387
4388 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
4389
4390 hpsa_ext_ctrl_present(h, physdev_list);
4391
4392
4393 for (i = 0; i < ndevs_to_allocate; i++) {
4394 if (i >= HPSA_MAX_DEVICES) {
4395 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
4396 " %d devices ignored.\n", HPSA_MAX_DEVICES,
4397 ndevs_to_allocate - HPSA_MAX_DEVICES);
4398 break;
4399 }
4400
4401 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
4402 if (!currentsd[i]) {
4403 h->drv_req_rescan = 1;
4404 goto out;
4405 }
4406 ndev_allocated++;
4407 }
4408
4409 if (is_scsi_rev_5(h))
4410 raid_ctlr_position = 0;
4411 else
4412 raid_ctlr_position = nphysicals + nlogicals;
4413
4414
4415 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
4416 u8 *lunaddrbytes, is_OBDR = 0;
4417 int rc = 0;
4418 int phys_dev_index = i - (raid_ctlr_position == 0);
4419 bool skip_device = false;
4420
4421 memset(tmpdevice, 0, sizeof(*tmpdevice));
4422
4423 physical_device = i < nphysicals + (raid_ctlr_position == 0);
4424
4425
4426 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
4427 i, nphysicals, nlogicals, physdev_list, logdev_list);
4428
4429
4430 tmpdevice->external =
4431 figure_external_status(h, raid_ctlr_position, i,
4432 nphysicals, nlocal_logicals);
4433
4434
4435
4436
4437 if (phys_dev_index >= 0 && !tmpdevice->external &&
4438 physical_device) {
4439 skip_device = hpsa_skip_device(h, lunaddrbytes,
4440 &physdev_list->LUN[phys_dev_index]);
4441 if (skip_device)
4442 continue;
4443 }
4444
4445
4446 rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
4447 &is_OBDR);
4448 if (rc == -ENOMEM) {
4449 dev_warn(&h->pdev->dev,
4450 "Out of memory, rescan deferred.\n");
4451 h->drv_req_rescan = 1;
4452 goto out;
4453 }
4454 if (rc) {
4455 h->drv_req_rescan = 1;
4456 continue;
4457 }
4458
4459 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
4460 this_device = currentsd[ncurrent];
4461
4462 *this_device = *tmpdevice;
4463 this_device->physical_device = physical_device;
4464
4465
4466
4467
4468
4469 if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device)
4470 this_device->expose_device = 0;
4471 else
4472 this_device->expose_device = 1;
4473
4474
4475
4476
4477
4478 if (this_device->physical_device && this_device->expose_device)
4479 hpsa_get_sas_address(h, lunaddrbytes, this_device);
4480
4481 switch (this_device->devtype) {
4482 case TYPE_ROM:
4483
4484
4485
4486
4487
4488
4489
4490 if (is_OBDR)
4491 ncurrent++;
4492 break;
4493 case TYPE_DISK:
4494 case TYPE_ZBC:
4495 if (this_device->physical_device) {
4496
4497
4498 this_device->offload_enabled = 0;
4499 hpsa_get_ioaccel_drive_info(h, this_device,
4500 physdev_list, phys_dev_index, id_phys);
4501 hpsa_get_path_info(this_device,
4502 physdev_list, phys_dev_index, id_phys);
4503 }
4504 ncurrent++;
4505 break;
4506 case TYPE_TAPE:
4507 case TYPE_MEDIUM_CHANGER:
4508 ncurrent++;
4509 break;
4510 case TYPE_ENCLOSURE:
4511 if (!this_device->external)
4512 hpsa_get_enclosure_info(h, lunaddrbytes,
4513 physdev_list, phys_dev_index,
4514 this_device);
4515 ncurrent++;
4516 break;
4517 case TYPE_RAID:
4518
4519
4520
4521
4522
4523 if (!is_hba_lunid(lunaddrbytes))
4524 break;
4525 ncurrent++;
4526 break;
4527 default:
4528 break;
4529 }
4530 if (ncurrent >= HPSA_MAX_DEVICES)
4531 break;
4532 }
4533
4534 if (h->sas_host == NULL) {
4535 int rc = 0;
4536
4537 rc = hpsa_add_sas_host(h);
4538 if (rc) {
4539 dev_warn(&h->pdev->dev,
4540 "Could not add sas host %d\n", rc);
4541 goto out;
4542 }
4543 }
4544
4545 adjust_hpsa_scsi_table(h, currentsd, ncurrent);
4546out:
4547 kfree(tmpdevice);
4548 for (i = 0; i < ndev_allocated; i++)
4549 kfree(currentsd[i]);
4550 kfree(currentsd);
4551 kfree(physdev_list);
4552 kfree(logdev_list);
4553 kfree(id_ctlr);
4554 kfree(id_phys);
4555}
4556
4557static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
4558 struct scatterlist *sg)
4559{
4560 u64 addr64 = (u64) sg_dma_address(sg);
4561 unsigned int len = sg_dma_len(sg);
4562
4563 desc->Addr = cpu_to_le64(addr64);
4564 desc->Len = cpu_to_le32(len);
4565 desc->Ext = 0;
4566}
4567
4568
4569
4570
4571
4572
4573static int hpsa_scatter_gather(struct ctlr_info *h,
4574 struct CommandList *cp,
4575 struct scsi_cmnd *cmd)
4576{
4577 struct scatterlist *sg;
4578 int use_sg, i, sg_limit, chained;
4579 struct SGDescriptor *curr_sg;
4580
4581 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4582
4583 use_sg = scsi_dma_map(cmd);
4584 if (use_sg < 0)
4585 return use_sg;
4586
4587 if (!use_sg)
4588 goto sglist_finished;
4589
4590
4591
4592
4593
4594
4595
4596
4597 curr_sg = cp->SG;
4598 chained = use_sg > h->max_cmd_sg_entries;
4599 sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
4600 scsi_for_each_sg(cmd, sg, sg_limit, i) {
4601 hpsa_set_sg_descriptor(curr_sg, sg);
4602 curr_sg++;
4603 }
4604
4605 if (chained) {
4606
4607
4608
4609
4610
4611
4612 curr_sg = h->cmd_sg_list[cp->cmdindex];
4613 sg_limit = use_sg - sg_limit;
4614 for_each_sg(sg, sg, sg_limit, i) {
4615 hpsa_set_sg_descriptor(curr_sg, sg);
4616 curr_sg++;
4617 }
4618 }
4619
4620
4621 (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
4622
4623 if (use_sg + chained > h->maxSG)
4624 h->maxSG = use_sg + chained;
4625
4626 if (chained) {
4627 cp->Header.SGList = h->max_cmd_sg_entries;
4628 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
4629 if (hpsa_map_sg_chain_block(h, cp)) {
4630 scsi_dma_unmap(cmd);
4631 return -1;
4632 }
4633 return 0;
4634 }
4635
4636sglist_finished:
4637
4638 cp->Header.SGList = (u8) use_sg;
4639 cp->Header.SGTotal = cpu_to_le16(use_sg);
4640 return 0;
4641}
4642
4643static inline void warn_zero_length_transfer(struct ctlr_info *h,
4644 u8 *cdb, int cdb_len,
4645 const char *func)
4646{
4647 dev_warn(&h->pdev->dev,
4648 "%s: Blocking zero-length request: CDB:%*phN\n",
4649 func, cdb_len, cdb);
4650}
4651
4652#define IO_ACCEL_INELIGIBLE 1
4653
4654static bool is_zero_length_transfer(u8 *cdb)
4655{
4656 u32 block_cnt;
4657
4658
4659 switch (cdb[0]) {
4660 case READ_10:
4661 case WRITE_10:
4662 case VERIFY:
4663 case WRITE_VERIFY:
4664 block_cnt = get_unaligned_be16(&cdb[7]);
4665 break;
4666 case READ_12:
4667 case WRITE_12:
4668 case VERIFY_12:
4669 case WRITE_VERIFY_12:
4670 block_cnt = get_unaligned_be32(&cdb[6]);
4671 break;
4672 case READ_16:
4673 case WRITE_16:
4674 case VERIFY_16:
4675 block_cnt = get_unaligned_be32(&cdb[10]);
4676 break;
4677 default:
4678 return false;
4679 }
4680
4681 return block_cnt == 0;
4682}
4683
4684static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
4685{
4686 int is_write = 0;
4687 u32 block;
4688 u32 block_cnt;
4689
4690
4691 switch (cdb[0]) {
4692 case WRITE_6:
4693 case WRITE_12:
4694 is_write = 1;
4695 fallthrough;
4696 case READ_6:
4697 case READ_12:
4698 if (*cdb_len == 6) {
4699 block = (((cdb[1] & 0x1F) << 16) |
4700 (cdb[2] << 8) |
4701 cdb[3]);
4702 block_cnt = cdb[4];
4703 if (block_cnt == 0)
4704 block_cnt = 256;
4705 } else {
4706 BUG_ON(*cdb_len != 12);
4707 block = get_unaligned_be32(&cdb[2]);
4708 block_cnt = get_unaligned_be32(&cdb[6]);
4709 }
4710 if (block_cnt > 0xffff)
4711 return IO_ACCEL_INELIGIBLE;
4712
4713 cdb[0] = is_write ? WRITE_10 : READ_10;
4714 cdb[1] = 0;
4715 cdb[2] = (u8) (block >> 24);
4716 cdb[3] = (u8) (block >> 16);
4717 cdb[4] = (u8) (block >> 8);
4718 cdb[5] = (u8) (block);
4719 cdb[6] = 0;
4720 cdb[7] = (u8) (block_cnt >> 8);
4721 cdb[8] = (u8) (block_cnt);
4722 cdb[9] = 0;
4723 *cdb_len = 10;
4724 break;
4725 }
4726 return 0;
4727}
4728
4729static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
4730 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4731 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4732{
4733 struct scsi_cmnd *cmd = c->scsi_cmd;
4734 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
4735 unsigned int len;
4736 unsigned int total_len = 0;
4737 struct scatterlist *sg;
4738 u64 addr64;
4739 int use_sg, i;
4740 struct SGDescriptor *curr_sg;
4741 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
4742
4743
4744 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
4745 atomic_dec(&phys_disk->ioaccel_cmds_out);
4746 return IO_ACCEL_INELIGIBLE;
4747 }
4748
4749 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
4750
4751 if (is_zero_length_transfer(cdb)) {
4752 warn_zero_length_transfer(h, cdb, cdb_len, __func__);
4753 atomic_dec(&phys_disk->ioaccel_cmds_out);
4754 return IO_ACCEL_INELIGIBLE;
4755 }
4756
4757 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4758 atomic_dec(&phys_disk->ioaccel_cmds_out);
4759 return IO_ACCEL_INELIGIBLE;
4760 }
4761
4762 c->cmd_type = CMD_IOACCEL1;
4763
4764
4765 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
4766 (c->cmdindex * sizeof(*cp));
4767 BUG_ON(c->busaddr & 0x0000007F);
4768
4769 use_sg = scsi_dma_map(cmd);
4770 if (use_sg < 0) {
4771 atomic_dec(&phys_disk->ioaccel_cmds_out);
4772 return use_sg;
4773 }
4774
4775 if (use_sg) {
4776 curr_sg = cp->SG;
4777 scsi_for_each_sg(cmd, sg, use_sg, i) {
4778 addr64 = (u64) sg_dma_address(sg);
4779 len = sg_dma_len(sg);
4780 total_len += len;
4781 curr_sg->Addr = cpu_to_le64(addr64);
4782 curr_sg->Len = cpu_to_le32(len);
4783 curr_sg->Ext = cpu_to_le32(0);
4784 curr_sg++;
4785 }
4786 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
4787
4788 switch (cmd->sc_data_direction) {
4789 case DMA_TO_DEVICE:
4790 control |= IOACCEL1_CONTROL_DATA_OUT;
4791 break;
4792 case DMA_FROM_DEVICE:
4793 control |= IOACCEL1_CONTROL_DATA_IN;
4794 break;
4795 case DMA_NONE:
4796 control |= IOACCEL1_CONTROL_NODATAXFER;
4797 break;
4798 default:
4799 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4800 cmd->sc_data_direction);
4801 BUG();
4802 break;
4803 }
4804 } else {
4805 control |= IOACCEL1_CONTROL_NODATAXFER;
4806 }
4807
4808 c->Header.SGList = use_sg;
4809
4810 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
4811 cp->transfer_len = cpu_to_le32(total_len);
4812 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
4813 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
4814 cp->control = cpu_to_le32(control);
4815 memcpy(cp->CDB, cdb, cdb_len);
4816 memcpy(cp->CISS_LUN, scsi3addr, 8);
4817
4818 enqueue_cmd_and_start_io(h, c);
4819 return 0;
4820}
4821
4822
4823
4824
4825
4826static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4827 struct CommandList *c)
4828{
4829 struct scsi_cmnd *cmd = c->scsi_cmd;
4830 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4831
4832 if (!dev)
4833 return -1;
4834
4835 c->phys_disk = dev;
4836
4837 if (dev->in_reset)
4838 return -1;
4839
4840 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
4841 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
4842}
4843
4844
4845
4846
4847static void set_encrypt_ioaccel2(struct ctlr_info *h,
4848 struct CommandList *c, struct io_accel2_cmd *cp)
4849{
4850 struct scsi_cmnd *cmd = c->scsi_cmd;
4851 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4852 struct raid_map_data *map = &dev->raid_map;
4853 u64 first_block;
4854
4855
4856 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
4857 return;
4858
4859 cp->dekindex = map->dekindex;
4860
4861
4862 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
4863
4864
4865
4866
4867
4868 switch (cmd->cmnd[0]) {
4869
4870 case READ_6:
4871 case WRITE_6:
4872 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
4873 (cmd->cmnd[2] << 8) |
4874 cmd->cmnd[3]);
4875 break;
4876 case WRITE_10:
4877 case READ_10:
4878
4879 case WRITE_12:
4880 case READ_12:
4881 first_block = get_unaligned_be32(&cmd->cmnd[2]);
4882 break;
4883 case WRITE_16:
4884 case READ_16:
4885 first_block = get_unaligned_be64(&cmd->cmnd[2]);
4886 break;
4887 default:
4888 dev_err(&h->pdev->dev,
4889 "ERROR: %s: size (0x%x) not supported for encryption\n",
4890 __func__, cmd->cmnd[0]);
4891 BUG();
4892 break;
4893 }
4894
4895 if (le32_to_cpu(map->volume_blk_size) != 512)
4896 first_block = first_block *
4897 le32_to_cpu(map->volume_blk_size)/512;
4898
4899 cp->tweak_lower = cpu_to_le32(first_block);
4900 cp->tweak_upper = cpu_to_le32(first_block >> 32);
4901}
4902
4903static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4904 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4905 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4906{
4907 struct scsi_cmnd *cmd = c->scsi_cmd;
4908 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
4909 struct ioaccel2_sg_element *curr_sg;
4910 int use_sg, i;
4911 struct scatterlist *sg;
4912 u64 addr64;
4913 u32 len;
4914 u32 total_len = 0;
4915
4916 if (!cmd->device)
4917 return -1;
4918
4919 if (!cmd->device->hostdata)
4920 return -1;
4921
4922 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4923
4924 if (is_zero_length_transfer(cdb)) {
4925 warn_zero_length_transfer(h, cdb, cdb_len, __func__);
4926 atomic_dec(&phys_disk->ioaccel_cmds_out);
4927 return IO_ACCEL_INELIGIBLE;
4928 }
4929
4930 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4931 atomic_dec(&phys_disk->ioaccel_cmds_out);
4932 return IO_ACCEL_INELIGIBLE;
4933 }
4934
4935 c->cmd_type = CMD_IOACCEL2;
4936
4937 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
4938 (c->cmdindex * sizeof(*cp));
4939 BUG_ON(c->busaddr & 0x0000007F);
4940
4941 memset(cp, 0, sizeof(*cp));
4942 cp->IU_type = IOACCEL2_IU_TYPE;
4943
4944 use_sg = scsi_dma_map(cmd);
4945 if (use_sg < 0) {
4946 atomic_dec(&phys_disk->ioaccel_cmds_out);
4947 return use_sg;
4948 }
4949
4950 if (use_sg) {
4951 curr_sg = cp->sg;
4952 if (use_sg > h->ioaccel_maxsg) {
4953 addr64 = le64_to_cpu(
4954 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4955 curr_sg->address = cpu_to_le64(addr64);
4956 curr_sg->length = 0;
4957 curr_sg->reserved[0] = 0;
4958 curr_sg->reserved[1] = 0;
4959 curr_sg->reserved[2] = 0;
4960 curr_sg->chain_indicator = IOACCEL2_CHAIN;
4961
4962 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4963 }
4964 scsi_for_each_sg(cmd, sg, use_sg, i) {
4965 addr64 = (u64) sg_dma_address(sg);
4966 len = sg_dma_len(sg);
4967 total_len += len;
4968 curr_sg->address = cpu_to_le64(addr64);
4969 curr_sg->length = cpu_to_le32(len);
4970 curr_sg->reserved[0] = 0;
4971 curr_sg->reserved[1] = 0;
4972 curr_sg->reserved[2] = 0;
4973 curr_sg->chain_indicator = 0;
4974 curr_sg++;
4975 }
4976
4977
4978
4979
4980 (curr_sg - 1)->chain_indicator = IOACCEL2_LAST_SG;
4981
4982 switch (cmd->sc_data_direction) {
4983 case DMA_TO_DEVICE:
4984 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4985 cp->direction |= IOACCEL2_DIR_DATA_OUT;
4986 break;
4987 case DMA_FROM_DEVICE:
4988 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4989 cp->direction |= IOACCEL2_DIR_DATA_IN;
4990 break;
4991 case DMA_NONE:
4992 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4993 cp->direction |= IOACCEL2_DIR_NO_DATA;
4994 break;
4995 default:
4996 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4997 cmd->sc_data_direction);
4998 BUG();
4999 break;
5000 }
5001 } else {
5002 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
5003 cp->direction |= IOACCEL2_DIR_NO_DATA;
5004 }
5005
5006
5007 set_encrypt_ioaccel2(h, c, cp);
5008
5009 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
5010 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
5011 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
5012
5013 cp->data_len = cpu_to_le32(total_len);
5014 cp->err_ptr = cpu_to_le64(c->busaddr +
5015 offsetof(struct io_accel2_cmd, error_data));
5016 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
5017
5018
5019 if (use_sg > h->ioaccel_maxsg) {
5020 cp->sg_count = 1;
5021 cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0]));
5022 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
5023 atomic_dec(&phys_disk->ioaccel_cmds_out);
5024 scsi_dma_unmap(cmd);
5025 return -1;
5026 }
5027 } else
5028 cp->sg_count = (u8) use_sg;
5029
5030 if (phys_disk->in_reset) {
5031 cmd->result = DID_RESET << 16;
5032 return -1;
5033 }
5034
5035 enqueue_cmd_and_start_io(h, c);
5036 return 0;
5037}
5038
5039
5040
5041
5042static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
5043 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
5044 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
5045{
5046 if (!c->scsi_cmd->device)
5047 return -1;
5048
5049 if (!c->scsi_cmd->device->hostdata)
5050 return -1;
5051
5052 if (phys_disk->in_reset)
5053 return -1;
5054
5055
5056 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
5057 phys_disk->queue_depth) {
5058 atomic_dec(&phys_disk->ioaccel_cmds_out);
5059 return IO_ACCEL_INELIGIBLE;
5060 }
5061 if (h->transMethod & CFGTBL_Trans_io_accel1)
5062 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
5063 cdb, cdb_len, scsi3addr,
5064 phys_disk);
5065 else
5066 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
5067 cdb, cdb_len, scsi3addr,
5068 phys_disk);
5069}
5070
5071static void raid_map_helper(struct raid_map_data *map,
5072 int offload_to_mirror, u32 *map_index, u32 *current_group)
5073{
5074 if (offload_to_mirror == 0) {
5075
5076 *map_index %= le16_to_cpu(map->data_disks_per_row);
5077 return;
5078 }
5079 do {
5080
5081 *current_group = *map_index /
5082 le16_to_cpu(map->data_disks_per_row);
5083 if (offload_to_mirror == *current_group)
5084 continue;
5085 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
5086
5087 *map_index += le16_to_cpu(map->data_disks_per_row);
5088 (*current_group)++;
5089 } else {
5090
5091 *map_index %= le16_to_cpu(map->data_disks_per_row);
5092 *current_group = 0;
5093 }
5094 } while (offload_to_mirror != *current_group);
5095}
5096
5097
5098
5099
5100static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
5101 struct CommandList *c)
5102{
5103 struct scsi_cmnd *cmd = c->scsi_cmd;
5104 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5105 struct raid_map_data *map = &dev->raid_map;
5106 struct raid_map_disk_data *dd = &map->data[0];
5107 int is_write = 0;
5108 u32 map_index;
5109 u64 first_block, last_block;
5110 u32 block_cnt;
5111 u32 blocks_per_row;
5112 u64 first_row, last_row;
5113 u32 first_row_offset, last_row_offset;
5114 u32 first_column, last_column;
5115 u64 r0_first_row, r0_last_row;
5116 u32 r5or6_blocks_per_row;
5117 u64 r5or6_first_row, r5or6_last_row;
5118 u32 r5or6_first_row_offset, r5or6_last_row_offset;
5119 u32 r5or6_first_column, r5or6_last_column;
5120 u32 total_disks_per_row;
5121 u32 stripesize;
5122 u32 first_group, last_group, current_group;
5123 u32 map_row;
5124 u32 disk_handle;
5125 u64 disk_block;
5126 u32 disk_block_cnt;
5127 u8 cdb[16];
5128 u8 cdb_len;
5129 u16 strip_size;
5130#if BITS_PER_LONG == 32
5131 u64 tmpdiv;
5132#endif
5133 int offload_to_mirror;
5134
5135 if (!dev)
5136 return -1;
5137
5138 if (dev->in_reset)
5139 return -1;
5140
5141
5142 switch (cmd->cmnd[0]) {
5143 case WRITE_6:
5144 is_write = 1;
5145 fallthrough;
5146 case READ_6:
5147 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
5148 (cmd->cmnd[2] << 8) |
5149 cmd->cmnd[3]);
5150 block_cnt = cmd->cmnd[4];
5151 if (block_cnt == 0)
5152 block_cnt = 256;
5153 break;
5154 case WRITE_10:
5155 is_write = 1;
5156 fallthrough;
5157 case READ_10:
5158 first_block =
5159 (((u64) cmd->cmnd[2]) << 24) |
5160 (((u64) cmd->cmnd[3]) << 16) |
5161 (((u64) cmd->cmnd[4]) << 8) |
5162 cmd->cmnd[5];
5163 block_cnt =
5164 (((u32) cmd->cmnd[7]) << 8) |
5165 cmd->cmnd[8];
5166 break;
5167 case WRITE_12:
5168 is_write = 1;
5169 fallthrough;
5170 case READ_12:
5171 first_block =
5172 (((u64) cmd->cmnd[2]) << 24) |
5173 (((u64) cmd->cmnd[3]) << 16) |
5174 (((u64) cmd->cmnd[4]) << 8) |
5175 cmd->cmnd[5];
5176 block_cnt =
5177 (((u32) cmd->cmnd[6]) << 24) |
5178 (((u32) cmd->cmnd[7]) << 16) |
5179 (((u32) cmd->cmnd[8]) << 8) |
5180 cmd->cmnd[9];
5181 break;
5182 case WRITE_16:
5183 is_write = 1;
5184 fallthrough;
5185 case READ_16:
5186 first_block =
5187 (((u64) cmd->cmnd[2]) << 56) |
5188 (((u64) cmd->cmnd[3]) << 48) |
5189 (((u64) cmd->cmnd[4]) << 40) |
5190 (((u64) cmd->cmnd[5]) << 32) |
5191 (((u64) cmd->cmnd[6]) << 24) |
5192 (((u64) cmd->cmnd[7]) << 16) |
5193 (((u64) cmd->cmnd[8]) << 8) |
5194 cmd->cmnd[9];
5195 block_cnt =
5196 (((u32) cmd->cmnd[10]) << 24) |
5197 (((u32) cmd->cmnd[11]) << 16) |
5198 (((u32) cmd->cmnd[12]) << 8) |
5199 cmd->cmnd[13];
5200 break;
5201 default:
5202 return IO_ACCEL_INELIGIBLE;
5203 }
5204 last_block = first_block + block_cnt - 1;
5205
5206
5207 if (is_write && dev->raid_level != 0)
5208 return IO_ACCEL_INELIGIBLE;
5209
5210
5211 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
5212 last_block < first_block)
5213 return IO_ACCEL_INELIGIBLE;
5214
5215
5216 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
5217 le16_to_cpu(map->strip_size);
5218 strip_size = le16_to_cpu(map->strip_size);
5219#if BITS_PER_LONG == 32
5220 tmpdiv = first_block;
5221 (void) do_div(tmpdiv, blocks_per_row);
5222 first_row = tmpdiv;
5223 tmpdiv = last_block;
5224 (void) do_div(tmpdiv, blocks_per_row);
5225 last_row = tmpdiv;
5226 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5227 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5228 tmpdiv = first_row_offset;
5229 (void) do_div(tmpdiv, strip_size);
5230 first_column = tmpdiv;
5231 tmpdiv = last_row_offset;
5232 (void) do_div(tmpdiv, strip_size);
5233 last_column = tmpdiv;
5234#else
5235 first_row = first_block / blocks_per_row;
5236 last_row = last_block / blocks_per_row;
5237 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5238 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5239 first_column = first_row_offset / strip_size;
5240 last_column = last_row_offset / strip_size;
5241#endif
5242
5243
5244 if ((first_row != last_row) || (first_column != last_column))
5245 return IO_ACCEL_INELIGIBLE;
5246
5247
5248 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
5249 le16_to_cpu(map->metadata_disks_per_row);
5250 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5251 le16_to_cpu(map->row_cnt);
5252 map_index = (map_row * total_disks_per_row) + first_column;
5253
5254 switch (dev->raid_level) {
5255 case HPSA_RAID_0:
5256 break;
5257 case HPSA_RAID_1:
5258
5259
5260
5261
5262
5263 if (le16_to_cpu(map->layout_map_count) != 2) {
5264 hpsa_turn_off_ioaccel_for_device(dev);
5265 return IO_ACCEL_INELIGIBLE;
5266 }
5267 if (dev->offload_to_mirror)
5268 map_index += le16_to_cpu(map->data_disks_per_row);
5269 dev->offload_to_mirror = !dev->offload_to_mirror;
5270 break;
5271 case HPSA_RAID_ADM:
5272
5273
5274
5275
5276 if (le16_to_cpu(map->layout_map_count) != 3) {
5277 hpsa_turn_off_ioaccel_for_device(dev);
5278 return IO_ACCEL_INELIGIBLE;
5279 }
5280
5281 offload_to_mirror = dev->offload_to_mirror;
5282 raid_map_helper(map, offload_to_mirror,
5283 &map_index, ¤t_group);
5284
5285 offload_to_mirror =
5286 (offload_to_mirror >=
5287 le16_to_cpu(map->layout_map_count) - 1)
5288 ? 0 : offload_to_mirror + 1;
5289 dev->offload_to_mirror = offload_to_mirror;
5290
5291
5292
5293
5294 break;
5295 case HPSA_RAID_5:
5296 case HPSA_RAID_6:
5297 if (le16_to_cpu(map->layout_map_count) <= 1)
5298 break;
5299
5300
5301 r5or6_blocks_per_row =
5302 le16_to_cpu(map->strip_size) *
5303 le16_to_cpu(map->data_disks_per_row);
5304 if (r5or6_blocks_per_row == 0) {
5305 hpsa_turn_off_ioaccel_for_device(dev);
5306 return IO_ACCEL_INELIGIBLE;
5307 }
5308 stripesize = r5or6_blocks_per_row *
5309 le16_to_cpu(map->layout_map_count);
5310#if BITS_PER_LONG == 32
5311 tmpdiv = first_block;
5312 first_group = do_div(tmpdiv, stripesize);
5313 tmpdiv = first_group;
5314 (void) do_div(tmpdiv, r5or6_blocks_per_row);
5315 first_group = tmpdiv;
5316 tmpdiv = last_block;
5317 last_group = do_div(tmpdiv, stripesize);
5318 tmpdiv = last_group;
5319 (void) do_div(tmpdiv, r5or6_blocks_per_row);
5320 last_group = tmpdiv;
5321#else
5322 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
5323 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
5324#endif
5325 if (first_group != last_group)
5326 return IO_ACCEL_INELIGIBLE;
5327
5328
5329#if BITS_PER_LONG == 32
5330 tmpdiv = first_block;
5331 (void) do_div(tmpdiv, stripesize);
5332 first_row = r5or6_first_row = r0_first_row = tmpdiv;
5333 tmpdiv = last_block;
5334 (void) do_div(tmpdiv, stripesize);
5335 r5or6_last_row = r0_last_row = tmpdiv;
5336#else
5337 first_row = r5or6_first_row = r0_first_row =
5338 first_block / stripesize;
5339 r5or6_last_row = r0_last_row = last_block / stripesize;
5340#endif
5341 if (r5or6_first_row != r5or6_last_row)
5342 return IO_ACCEL_INELIGIBLE;
5343
5344
5345
5346#if BITS_PER_LONG == 32
5347 tmpdiv = first_block;
5348 first_row_offset = do_div(tmpdiv, stripesize);
5349 tmpdiv = first_row_offset;
5350 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
5351 r5or6_first_row_offset = first_row_offset;
5352 tmpdiv = last_block;
5353 r5or6_last_row_offset = do_div(tmpdiv