1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/debugfs.h>
17#include <linux/skbuff.h>
18#include <linux/kthread.h>
19#include <scsi/scsi.h>
20#include <scsi/scsi_host.h>
21#include <scsi/scsi_cmnd.h>
22#include <scsi/scsi_device.h>
23
24#include "visorbus.h"
25#include "iochannel.h"
26
27
28
29#define IOS_ERROR_THRESHOLD 1000
30
31
32
33#define MAX_BUF 8192
34#define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS * 2)
35#define VISORHBA_ERROR_COUNT 30
36#define VISORHBA_OPEN_MAX 1
37
38static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
39 void (*visorhba_cmnd_done)
40 (struct scsi_cmnd *));
41#ifdef DEF_SCSI_QCMD
42static DEF_SCSI_QCMD(visorhba_queue_command)
43#else
44#define visorhba_queue_command visorhba_queue_command_lck
45#endif
46static int visorhba_probe(struct visor_device *dev);
47static void visorhba_remove(struct visor_device *dev);
48static int visorhba_pause(struct visor_device *dev,
49 visorbus_state_complete_func complete_func);
50static int visorhba_resume(struct visor_device *dev,
51 visorbus_state_complete_func complete_func);
52
53static ssize_t info_debugfs_read(struct file *file, char __user *buf,
54 size_t len, loff_t *offset);
55static int set_no_disk_inquiry_result(unsigned char *buf,
56 size_t len, bool is_lun0);
57static struct dentry *visorhba_debugfs_dir;
58static const struct file_operations debugfs_info_fops = {
59 .read = info_debugfs_read,
60};
61
62
63static struct visor_channeltype_descriptor visorhba_channel_types[] = {
64
65
66
67 { SPAR_VHBA_CHANNEL_PROTOCOL_UUID, "sparvhba" },
68 { NULL_UUID_LE, NULL }
69};
70
71
72
73
74
75static struct visor_driver visorhba_driver = {
76 .name = "visorhba",
77 .owner = THIS_MODULE,
78 .channel_types = visorhba_channel_types,
79 .probe = visorhba_probe,
80 .remove = visorhba_remove,
81 .pause = visorhba_pause,
82 .resume = visorhba_resume,
83 .channel_interrupt = NULL,
84};
85MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
86MODULE_ALIAS("visorbus:" SPAR_VHBA_CHANNEL_PROTOCOL_UUID_STR);
87
88struct visordisk_info {
89 u32 valid;
90 u32 channel, id, lun;
91 atomic_t ios_threshold;
92 atomic_t error_count;
93 struct visordisk_info *next;
94};
95
96struct scsipending {
97 struct uiscmdrsp cmdrsp;
98 void *sent;
99 char cmdtype;
100};
101
102
103struct diskaddremove {
104 u8 add;
105 struct Scsi_Host *shost;
106 u32 channel, id, lun;
107 struct diskaddremove *next;
108};
109
110
111struct visorhba_devdata {
112 struct Scsi_Host *scsihost;
113 struct visor_device *dev;
114 struct list_head dev_info_list;
115
116
117
118 struct scsipending pending[MAX_PENDING_REQUESTS];
119
120 unsigned int nextinsert;
121 spinlock_t privlock;
122 bool serverdown;
123 bool serverchangingstate;
124 unsigned long long acquire_failed_cnt;
125 unsigned long long interrupts_rcvd;
126 unsigned long long interrupts_notme;
127 unsigned long long interrupts_disabled;
128 u64 __iomem *flags_addr;
129 atomic_t interrupt_rcvd;
130 wait_queue_head_t rsp_queue;
131 struct visordisk_info head;
132 unsigned int max_buff_len;
133 int devnum;
134 struct task_struct *thread;
135 int thread_wait_ms;
136};
137
138struct visorhba_devices_open {
139 struct visorhba_devdata *devdata;
140};
141
142static struct visorhba_devices_open visorhbas_open[VISORHBA_OPEN_MAX];
143
144#define for_each_vdisk_match(iter, list, match) \
145 for (iter = &list->head; iter->next; iter = iter->next) \
146 if ((iter->channel == match->channel) && \
147 (iter->id == match->id) && \
148 (iter->lun == match->lun))
149
150
151
152
153
154
155
156
157
158
159
160static struct task_struct *visor_thread_start
161(int (*threadfn)(void *), void *thrcontext, char *name)
162{
163 struct task_struct *task;
164
165 task = kthread_run(threadfn, thrcontext, "%s", name);
166 if (IS_ERR(task)) {
167 pr_err("visorbus failed to start thread\n");
168 return NULL;
169 }
170 return task;
171}
172
173
174
175
176static void visor_thread_stop(struct task_struct *task)
177{
178 if (!task)
179 return;
180 kthread_stop(task);
181}
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196static int add_scsipending_entry(struct visorhba_devdata *devdata,
197 char cmdtype, void *new)
198{
199 unsigned long flags;
200 struct scsipending *entry;
201 int insert_location;
202
203 spin_lock_irqsave(&devdata->privlock, flags);
204 insert_location = devdata->nextinsert;
205 while (devdata->pending[insert_location].sent) {
206 insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
207 if (insert_location == (int)devdata->nextinsert) {
208 spin_unlock_irqrestore(&devdata->privlock, flags);
209 return -1;
210 }
211 }
212
213 entry = &devdata->pending[insert_location];
214 memset(&entry->cmdrsp, 0, sizeof(entry->cmdrsp));
215 entry->cmdtype = cmdtype;
216 if (new)
217 entry->sent = new;
218 else
219 entry->sent = &entry->cmdrsp;
220 devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
221 spin_unlock_irqrestore(&devdata->privlock, flags);
222
223 return insert_location;
224}
225
226
227
228
229
230
231
232
233
234static void *del_scsipending_ent(struct visorhba_devdata *devdata,
235 int del)
236{
237 unsigned long flags;
238 void *sent;
239
240 if (del >= MAX_PENDING_REQUESTS)
241 return NULL;
242
243 spin_lock_irqsave(&devdata->privlock, flags);
244 sent = devdata->pending[del].sent;
245
246 devdata->pending[del].cmdtype = 0;
247 devdata->pending[del].sent = NULL;
248 spin_unlock_irqrestore(&devdata->privlock, flags);
249
250 return sent;
251}
252
253
254
255
256
257
258
259
260
261
262static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
263 int ent)
264{
265 if (ddata->pending[ent].sent)
266 return &ddata->pending[ent].cmdrsp;
267
268 return NULL;
269}
270
271
272
273
274
275
276
277
278
279
280
281static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
282 struct scsi_cmnd *scsicmd)
283{
284 struct uiscmdrsp *cmdrsp;
285 struct scsi_device *scsidev = scsicmd->device;
286 struct visorhba_devdata *devdata =
287 (struct visorhba_devdata *)scsidev->host->hostdata;
288 int notifyresult = 0xffff;
289 wait_queue_head_t notifyevent;
290 int scsicmd_id = 0;
291
292 if (devdata->serverdown || devdata->serverchangingstate)
293 return FAILED;
294
295 scsicmd_id = add_scsipending_entry(devdata, CMD_SCSITASKMGMT_TYPE,
296 NULL);
297 if (scsicmd_id < 0)
298 return FAILED;
299
300 cmdrsp = get_scsipending_cmdrsp(devdata, scsicmd_id);
301
302 init_waitqueue_head(¬ifyevent);
303
304
305 cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
306
307
308 cmdrsp->scsitaskmgmt.notify_handle = (u64)¬ifyevent;
309 cmdrsp->scsitaskmgmt.notifyresult_handle = (u64)¬ifyresult;
310
311
312 cmdrsp->scsitaskmgmt.tasktype = tasktype;
313 cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
314 cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
315 cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
316 cmdrsp->scsitaskmgmt.handle = scsicmd_id;
317
318 if (!visorchannel_signalinsert(devdata->dev->visorchannel,
319 IOCHAN_TO_IOPART,
320 cmdrsp))
321 goto err_del_scsipending_ent;
322
323
324
325
326 if (!wait_event_timeout(notifyevent, notifyresult != 0xffff,
327 msecs_to_jiffies(45000)))
328 goto err_del_scsipending_ent;
329
330 if (tasktype == TASK_MGMT_ABORT_TASK)
331 scsicmd->result = DID_ABORT << 16;
332 else
333 scsicmd->result = DID_RESET << 16;
334
335 scsicmd->scsi_done(scsicmd);
336
337 return SUCCESS;
338
339err_del_scsipending_ent:
340 del_scsipending_ent(devdata, scsicmd_id);
341 return FAILED;
342}
343
344
345
346
347
348
349
350
351static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
352{
353
354 struct scsi_device *scsidev;
355 struct visordisk_info *vdisk;
356 struct visorhba_devdata *devdata;
357
358 scsidev = scsicmd->device;
359 devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
360 for_each_vdisk_match(vdisk, devdata, scsidev) {
361 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
362 atomic_inc(&vdisk->error_count);
363 else
364 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
365 }
366 return forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsicmd);
367}
368
369
370
371
372
373
374
375static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
376{
377
378 struct scsi_device *scsidev;
379 struct visordisk_info *vdisk;
380 struct visorhba_devdata *devdata;
381
382 scsidev = scsicmd->device;
383 devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
384 for_each_vdisk_match(vdisk, devdata, scsidev) {
385 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
386 atomic_inc(&vdisk->error_count);
387 else
388 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
389 }
390 return forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsicmd);
391}
392
393
394
395
396
397
398
399
400static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
401{
402 struct scsi_device *scsidev;
403 struct visordisk_info *vdisk;
404 struct visorhba_devdata *devdata;
405
406 scsidev = scsicmd->device;
407 devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
408 for_each_vdisk_match(vdisk, devdata, scsidev) {
409 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
410 atomic_inc(&vdisk->error_count);
411 else
412 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
413 }
414 return forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsicmd);
415}
416
417
418
419
420
421
422
423
424static int
425visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
426{
427
428 return SUCCESS;
429}
430
431
432
433
434
435
436
437static const char *visorhba_get_info(struct Scsi_Host *shp)
438{
439
440 return "visorhba";
441}
442
443
444
445
446
447
448
449
450
451
452
453
454static int
455visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
456 void (*visorhba_cmnd_done)(struct scsi_cmnd *))
457{
458 struct uiscmdrsp *cmdrsp;
459 struct scsi_device *scsidev = scsicmd->device;
460 int insert_location;
461 unsigned char *cdb = scsicmd->cmnd;
462 struct Scsi_Host *scsihost = scsidev->host;
463 unsigned int i;
464 struct visorhba_devdata *devdata =
465 (struct visorhba_devdata *)scsihost->hostdata;
466 struct scatterlist *sg = NULL;
467 struct scatterlist *sglist = NULL;
468
469 if (devdata->serverdown || devdata->serverchangingstate)
470 return SCSI_MLQUEUE_DEVICE_BUSY;
471
472 insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE,
473 (void *)scsicmd);
474
475 if (insert_location < 0)
476 return SCSI_MLQUEUE_DEVICE_BUSY;
477
478 cmdrsp = get_scsipending_cmdrsp(devdata, insert_location);
479
480 cmdrsp->cmdtype = CMD_SCSI_TYPE;
481
482
483
484 cmdrsp->scsi.handle = insert_location;
485
486
487 scsicmd->scsi_done = visorhba_cmnd_done;
488
489 cmdrsp->scsi.vdest.channel = scsidev->channel;
490 cmdrsp->scsi.vdest.id = scsidev->id;
491 cmdrsp->scsi.vdest.lun = scsidev->lun;
492
493 cmdrsp->scsi.data_dir = scsicmd->sc_data_direction;
494 memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
495
496 cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
497
498
499 if (cmdrsp->scsi.bufflen > devdata->max_buff_len)
500 devdata->max_buff_len = cmdrsp->scsi.bufflen;
501
502 if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO)
503 goto err_del_scsipending_ent;
504
505
506
507 sglist = scsi_sglist(scsicmd);
508
509 for_each_sg(sglist, sg, scsi_sg_count(scsicmd), i) {
510 cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
511 cmdrsp->scsi.gpi_list[i].length = sg->length;
512 }
513 cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
514
515 if (!visorchannel_signalinsert(devdata->dev->visorchannel,
516 IOCHAN_TO_IOPART,
517 cmdrsp))
518
519 goto err_del_scsipending_ent;
520
521 return 0;
522
523err_del_scsipending_ent:
524 del_scsipending_ent(devdata, insert_location);
525 return SCSI_MLQUEUE_DEVICE_BUSY;
526}
527
528
529
530
531
532
533
534
535
536
537static int visorhba_slave_alloc(struct scsi_device *scsidev)
538{
539
540
541
542 struct visordisk_info *vdisk;
543 struct visordisk_info *tmpvdisk;
544 struct visorhba_devdata *devdata;
545 struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
546
547 devdata = (struct visorhba_devdata *)scsihost->hostdata;
548 if (!devdata)
549 return 0;
550
551 for_each_vdisk_match(vdisk, devdata, scsidev)
552 return 0;
553
554 tmpvdisk = kzalloc(sizeof(*tmpvdisk), GFP_ATOMIC);
555 if (!tmpvdisk)
556 return -ENOMEM;
557
558 tmpvdisk->channel = scsidev->channel;
559 tmpvdisk->id = scsidev->id;
560 tmpvdisk->lun = scsidev->lun;
561 vdisk->next = tmpvdisk;
562 return 0;
563}
564
565
566
567
568
569
570
571
572static void visorhba_slave_destroy(struct scsi_device *scsidev)
573{
574
575
576
577 struct visordisk_info *vdisk, *delvdisk;
578 struct visorhba_devdata *devdata;
579 struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
580
581 devdata = (struct visorhba_devdata *)scsihost->hostdata;
582 for_each_vdisk_match(vdisk, devdata, scsidev) {
583 delvdisk = vdisk->next;
584 vdisk->next = delvdisk->next;
585 kfree(delvdisk);
586 return;
587 }
588}
589
590static struct scsi_host_template visorhba_driver_template = {
591 .name = "Unisys Visor HBA",
592 .info = visorhba_get_info,
593 .queuecommand = visorhba_queue_command,
594 .eh_abort_handler = visorhba_abort_handler,
595 .eh_device_reset_handler = visorhba_device_reset_handler,
596 .eh_bus_reset_handler = visorhba_bus_reset_handler,
597 .eh_host_reset_handler = visorhba_host_reset_handler,
598 .shost_attrs = NULL,
599#define visorhba_MAX_CMNDS 128
600 .can_queue = visorhba_MAX_CMNDS,
601 .sg_tablesize = 64,
602 .this_id = -1,
603 .slave_alloc = visorhba_slave_alloc,
604 .slave_destroy = visorhba_slave_destroy,
605 .use_clustering = ENABLE_CLUSTERING,
606};
607
608
609
610
611
612
613
614
615
616
617
618
619static ssize_t info_debugfs_read(struct file *file, char __user *buf,
620 size_t len, loff_t *offset)
621{
622 ssize_t bytes_read = 0;
623 int str_pos = 0;
624 u64 phys_flags_addr;
625 int i;
626 struct visorhba_devdata *devdata;
627 char *vbuf;
628
629 if (len > MAX_BUF)
630 len = MAX_BUF;
631 vbuf = kzalloc(len, GFP_KERNEL);
632 if (!vbuf)
633 return -ENOMEM;
634
635 for (i = 0; i < VISORHBA_OPEN_MAX; i++) {
636 if (!visorhbas_open[i].devdata)
637 continue;
638
639 devdata = visorhbas_open[i].devdata;
640
641 str_pos += scnprintf(vbuf + str_pos,
642 len - str_pos, "max_buff_len:%u\n",
643 devdata->max_buff_len);
644
645 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
646 "\ninterrupts_rcvd = %llu, interrupts_disabled = %llu\n",
647 devdata->interrupts_rcvd,
648 devdata->interrupts_disabled);
649 str_pos += scnprintf(vbuf + str_pos,
650 len - str_pos, "\ninterrupts_notme = %llu,\n",
651 devdata->interrupts_notme);
652 phys_flags_addr = virt_to_phys((__force void *)
653 devdata->flags_addr);
654 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
655 "flags_addr = %p, phys_flags_addr=0x%016llx, FeatureFlags=%llu\n",
656 devdata->flags_addr, phys_flags_addr,
657 (__le64)readq(devdata->flags_addr));
658 str_pos += scnprintf(vbuf + str_pos,
659 len - str_pos, "acquire_failed_cnt:%llu\n",
660 devdata->acquire_failed_cnt);
661 str_pos += scnprintf(vbuf + str_pos, len - str_pos, "\n");
662 }
663
664 bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
665 kfree(vbuf);
666 return bytes_read;
667}
668
669
670
671
672
673
674
675
676
677
678static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
679{
680 int i;
681 struct scsipending *pendingdel = NULL;
682 struct scsi_cmnd *scsicmd = NULL;
683 struct uiscmdrsp *cmdrsp;
684 unsigned long flags;
685
686
687
688
689 visor_thread_stop(devdata->thread);
690
691
692 spin_lock_irqsave(&devdata->privlock, flags);
693 for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
694 pendingdel = &devdata->pending[i];
695 switch (pendingdel->cmdtype) {
696 case CMD_SCSI_TYPE:
697 scsicmd = pendingdel->sent;
698 scsicmd->result = DID_RESET << 16;
699 if (scsicmd->scsi_done)
700 scsicmd->scsi_done(scsicmd);
701 break;
702 case CMD_SCSITASKMGMT_TYPE:
703 cmdrsp = pendingdel->sent;
704 cmdrsp->scsitaskmgmt.notifyresult_handle
705 = TASK_MGMT_FAILED;
706 wake_up_all((wait_queue_head_t *)
707 cmdrsp->scsitaskmgmt.notify_handle);
708 break;
709 case CMD_VDISKMGMT_TYPE:
710 cmdrsp = pendingdel->sent;
711 cmdrsp->vdiskmgmt.notifyresult_handle
712 = VDISK_MGMT_FAILED;
713 wake_up_all((wait_queue_head_t *)
714 cmdrsp->vdiskmgmt.notify_handle);
715 break;
716 default:
717 break;
718 }
719 pendingdel->cmdtype = 0;
720 pendingdel->sent = NULL;
721 }
722 spin_unlock_irqrestore(&devdata->privlock, flags);
723
724 devdata->serverdown = true;
725 devdata->serverchangingstate = false;
726}
727
728
729
730
731
732
733
734
735
736static int visorhba_serverdown(struct visorhba_devdata *devdata)
737{
738 if (!devdata->serverdown && !devdata->serverchangingstate) {
739 devdata->serverchangingstate = true;
740 visorhba_serverdown_complete(devdata);
741 } else if (devdata->serverchangingstate) {
742 return -EINVAL;
743 }
744 return 0;
745}
746
747
748
749
750
751
752
753
754
755static void
756do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
757{
758 struct visorhba_devdata *devdata;
759 struct visordisk_info *vdisk;
760 struct scsi_device *scsidev;
761
762 scsidev = scsicmd->device;
763 memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
764
765
766 if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
767 (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
768 (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT))
769 return;
770
771 devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
772 for_each_vdisk_match(vdisk, devdata, scsidev) {
773 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
774 atomic_inc(&vdisk->error_count);
775 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
776 }
777 }
778}
779
780static int set_no_disk_inquiry_result(unsigned char *buf,
781 size_t len, bool is_lun0)
782{
783 if (!buf || len < NO_DISK_INQUIRY_RESULT_LEN)
784 return -EINVAL;
785 memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
786 buf[2] = SCSI_SPC2_VER;
787 if (is_lun0) {
788 buf[0] = DEV_DISK_CAPABLE_NOT_PRESENT;
789 buf[3] = DEV_HISUPPORT;
790 } else {
791 buf[0] = DEV_NOT_CAPABLE;
792 }
793 buf[4] = NO_DISK_INQUIRY_RESULT_LEN - 5;
794 strncpy(buf + 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN - 8);
795 return 0;
796}
797
798
799
800
801
802
803
804
805
806static void
807do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
808{
809 struct scsi_device *scsidev;
810 unsigned char buf[36];
811 struct scatterlist *sg;
812 unsigned int i;
813 char *this_page;
814 char *this_page_orig;
815 int bufind = 0;
816 struct visordisk_info *vdisk;
817 struct visorhba_devdata *devdata;
818
819 scsidev = scsicmd->device;
820 if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
821 (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) {
822 if (cmdrsp->scsi.no_disk_result == 0)
823 return;
824
825
826
827
828
829
830 set_no_disk_inquiry_result(buf, (size_t)cmdrsp->scsi.bufflen,
831 scsidev->lun == 0);
832
833 if (scsi_sg_count(scsicmd) == 0) {
834 memcpy(scsi_sglist(scsicmd), buf,
835 cmdrsp->scsi.bufflen);
836 return;
837 }
838
839 sg = scsi_sglist(scsicmd);
840 for (i = 0; i < scsi_sg_count(scsicmd); i++) {
841 this_page_orig = kmap_atomic(sg_page(sg + i));
842 this_page = (void *)((unsigned long)this_page_orig |
843 sg[i].offset);
844 memcpy(this_page, buf + bufind, sg[i].length);
845 kunmap_atomic(this_page_orig);
846 }
847 } else {
848 devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
849 for_each_vdisk_match(vdisk, devdata, scsidev) {
850 if (atomic_read(&vdisk->ios_threshold) > 0) {
851 atomic_dec(&vdisk->ios_threshold);
852 if (atomic_read(&vdisk->ios_threshold) == 0)
853 atomic_set(&vdisk->error_count, 0);
854 }
855 }
856 }
857}
858
859
860
861
862
863
864
865
866
867
868static void
869complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
870{
871
872 scsicmd->result = cmdrsp->scsi.linuxstat;
873 if (cmdrsp->scsi.linuxstat)
874 do_scsi_linuxstat(cmdrsp, scsicmd);
875 else
876 do_scsi_nolinuxstat(cmdrsp, scsicmd);
877
878 scsicmd->scsi_done(scsicmd);
879}
880
881
882static inline void complete_vdiskmgmt_command(struct uiscmdrsp *cmdrsp)
883{
884
885
886
887 cmdrsp->vdiskmgmt.notifyresult_handle = cmdrsp->vdiskmgmt.result;
888 wake_up_all((wait_queue_head_t *)cmdrsp->vdiskmgmt.notify_handle);
889}
890
891
892
893
894
895
896
897
898
899static inline void complete_taskmgmt_command(struct uiscmdrsp *cmdrsp)
900{
901
902
903
904 cmdrsp->vdiskmgmt.notifyresult_handle = cmdrsp->vdiskmgmt.result;
905 wake_up_all((wait_queue_head_t *)cmdrsp->scsitaskmgmt.notify_handle);
906}
907
908static struct work_struct dar_work_queue;
909static struct diskaddremove *dar_work_queue_head;
910static spinlock_t dar_work_queue_lock;
911static unsigned short dar_work_queue_sched;
912
913
914
915
916
917
918
919
920static inline void queue_disk_add_remove(struct diskaddremove *dar)
921{
922 unsigned long flags;
923
924 spin_lock_irqsave(&dar_work_queue_lock, flags);
925 if (!dar_work_queue_head) {
926 dar_work_queue_head = dar;
927 dar->next = NULL;
928 } else {
929 dar->next = dar_work_queue_head;
930 dar_work_queue_head = dar;
931 }
932 if (!dar_work_queue_sched) {
933 schedule_work(&dar_work_queue);
934 dar_work_queue_sched = 1;
935 }
936 spin_unlock_irqrestore(&dar_work_queue_lock, flags);
937}
938
939
940
941
942
943
944
945
946
947static void process_disk_notify(struct Scsi_Host *shost,
948 struct uiscmdrsp *cmdrsp)
949{
950 struct diskaddremove *dar;
951
952 dar = kzalloc(sizeof(*dar), GFP_ATOMIC);
953 if (!dar)
954 return;
955
956 dar->add = cmdrsp->disknotify.add;
957 dar->shost = shost;
958 dar->channel = cmdrsp->disknotify.channel;
959 dar->id = cmdrsp->disknotify.id;
960 dar->lun = cmdrsp->disknotify.lun;
961 queue_disk_add_remove(dar);
962}
963
964
965
966
967
968
969
970
971
972static void
973drain_queue(struct uiscmdrsp *cmdrsp, struct visorhba_devdata *devdata)
974{
975 struct scsi_cmnd *scsicmd;
976 struct Scsi_Host *shost = devdata->scsihost;
977
978 while (1) {
979 if (!visorchannel_signalremove(devdata->dev->visorchannel,
980 IOCHAN_FROM_IOPART,
981 cmdrsp))
982 break;
983
984 if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
985
986
987
988 scsicmd = del_scsipending_ent(devdata,
989 cmdrsp->scsi.handle);
990 if (!scsicmd)
991 break;
992
993 complete_scsi_command(cmdrsp, scsicmd);
994 } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
995 if (!del_scsipending_ent(devdata,
996 cmdrsp->scsitaskmgmt.handle))
997 break;
998 complete_taskmgmt_command(cmdrsp);
999 } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE) {
1000
1001
1002
1003
1004 cmdrsp->disknotify.v_hba = NULL;
1005 process_disk_notify(shost, cmdrsp);
1006 } else if (cmdrsp->cmdtype == CMD_VDISKMGMT_TYPE) {
1007 if (!del_scsipending_ent(devdata,
1008 cmdrsp->vdiskmgmt.handle))
1009 break;
1010 complete_vdiskmgmt_command(cmdrsp);
1011 }
1012
1013 }
1014}
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024static int process_incoming_rsps(void *v)
1025{
1026 struct visorhba_devdata *devdata = v;
1027 struct uiscmdrsp *cmdrsp = NULL;
1028 const int size = sizeof(*cmdrsp);
1029
1030 cmdrsp = kmalloc(size, GFP_ATOMIC);
1031 if (!cmdrsp)
1032 return -ENOMEM;
1033
1034 while (1) {
1035 if (kthread_should_stop())
1036 break;
1037 wait_event_interruptible_timeout(
1038 devdata->rsp_queue, (atomic_read(
1039 &devdata->interrupt_rcvd) == 1),
1040 msecs_to_jiffies(devdata->thread_wait_ms));
1041
1042 drain_queue(cmdrsp, devdata);
1043 }
1044 kfree(cmdrsp);
1045 return 0;
1046}
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058static int visorhba_pause(struct visor_device *dev,
1059 visorbus_state_complete_func complete_func)
1060{
1061 struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1062
1063 visorhba_serverdown(devdata);
1064 complete_func(dev, 0);
1065 return 0;
1066}
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077static int visorhba_resume(struct visor_device *dev,
1078 visorbus_state_complete_func complete_func)
1079{
1080 struct visorhba_devdata *devdata;
1081
1082 devdata = dev_get_drvdata(&dev->device);
1083 if (!devdata)
1084 return -EINVAL;
1085
1086 if (devdata->serverdown && !devdata->serverchangingstate)
1087 devdata->serverchangingstate = true;
1088
1089 devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1090 "vhba_incming");
1091
1092 devdata->serverdown = false;
1093 devdata->serverchangingstate = false;
1094
1095 return 0;
1096}
1097
1098
1099
1100
1101
1102
1103
1104
1105static int visorhba_probe(struct visor_device *dev)
1106{
1107 struct Scsi_Host *scsihost;
1108 struct vhba_config_max max;
1109 struct visorhba_devdata *devdata = NULL;
1110 int i, err, channel_offset;
1111 u64 features;
1112
1113 scsihost = scsi_host_alloc(&visorhba_driver_template,
1114 sizeof(*devdata));
1115 if (!scsihost)
1116 return -ENODEV;
1117
1118 channel_offset = offsetof(struct spar_io_channel_protocol,
1119 vhba.max);
1120 err = visorbus_read_channel(dev, channel_offset, &max,
1121 sizeof(struct vhba_config_max));
1122 if (err < 0)
1123 goto err_scsi_host_put;
1124
1125 scsihost->max_id = (unsigned)max.max_id;
1126 scsihost->max_lun = (unsigned)max.max_lun;
1127 scsihost->cmd_per_lun = (unsigned)max.cmd_per_lun;
1128 scsihost->max_sectors =
1129 (unsigned short)(max.max_io_size >> 9);
1130 scsihost->sg_tablesize =
1131 (unsigned short)(max.max_io_size / PAGE_SIZE);
1132 if (scsihost->sg_tablesize > MAX_PHYS_INFO)
1133 scsihost->sg_tablesize = MAX_PHYS_INFO;
1134 err = scsi_add_host(scsihost, &dev->device);
1135 if (err < 0)
1136 goto err_scsi_host_put;
1137
1138 devdata = (struct visorhba_devdata *)scsihost->hostdata;
1139 for (i = 0; i < VISORHBA_OPEN_MAX; i++) {
1140 if (!visorhbas_open[i].devdata) {
1141 visorhbas_open[i].devdata = devdata;
1142 break;
1143 }
1144 }
1145
1146 devdata->dev = dev;
1147 dev_set_drvdata(&dev->device, devdata);
1148
1149 init_waitqueue_head(&devdata->rsp_queue);
1150 spin_lock_init(&devdata->privlock);
1151 devdata->serverdown = false;
1152 devdata->serverchangingstate = false;
1153 devdata->scsihost = scsihost;
1154
1155 channel_offset = offsetof(struct spar_io_channel_protocol,
1156 channel_header.features);
1157 err = visorbus_read_channel(dev, channel_offset, &features, 8);
1158 if (err)
1159 goto err_scsi_remove_host;
1160 features |= ULTRA_IO_CHANNEL_IS_POLLING;
1161 err = visorbus_write_channel(dev, channel_offset, &features, 8);
1162 if (err)
1163 goto err_scsi_remove_host;
1164
1165 devdata->thread_wait_ms = 2;
1166 devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1167 "vhba_incoming");
1168
1169 scsi_scan_host(scsihost);
1170
1171 return 0;
1172
1173err_scsi_remove_host:
1174 scsi_remove_host(scsihost);
1175
1176err_scsi_host_put:
1177 scsi_host_put(scsihost);
1178 return err;
1179}
1180
1181
1182
1183
1184
1185
1186
1187
1188static void visorhba_remove(struct visor_device *dev)
1189{
1190 struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1191 struct Scsi_Host *scsihost = NULL;
1192
1193 if (!devdata)
1194 return;
1195
1196 scsihost = devdata->scsihost;
1197 visor_thread_stop(devdata->thread);
1198 scsi_remove_host(scsihost);
1199 scsi_host_put(scsihost);
1200
1201 dev_set_drvdata(&dev->device, NULL);
1202}
1203
1204
1205
1206
1207
1208
1209
1210static int visorhba_init(void)
1211{
1212 struct dentry *ret;
1213 int rc = -ENOMEM;
1214
1215 visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
1216 if (!visorhba_debugfs_dir)
1217 return -ENOMEM;
1218
1219 ret = debugfs_create_file("info", S_IRUSR, visorhba_debugfs_dir, NULL,
1220 &debugfs_info_fops);
1221
1222 if (!ret) {
1223 rc = -EIO;
1224 goto cleanup_debugfs;
1225 }
1226
1227 rc = visorbus_register_visor_driver(&visorhba_driver);
1228 if (rc)
1229 goto cleanup_debugfs;
1230
1231 return rc;
1232
1233cleanup_debugfs:
1234 debugfs_remove_recursive(visorhba_debugfs_dir);
1235
1236 return rc;
1237}
1238
1239
1240
1241
1242
1243
1244static void visorhba_exit(void)
1245{
1246 visorbus_unregister_visor_driver(&visorhba_driver);
1247 debugfs_remove_recursive(visorhba_debugfs_dir);
1248}
1249
1250module_init(visorhba_init);
1251module_exit(visorhba_exit);
1252
1253MODULE_AUTHOR("Unisys");
1254MODULE_LICENSE("GPL");
1255MODULE_DESCRIPTION("s-Par hba driver");
1256