1
2
3
4
5
6
7#include <linux/debugfs.h>
8#include <linux/kthread.h>
9#include <linux/idr.h>
10#include <linux/module.h>
11#include <linux/seq_file.h>
12#include <linux/visorbus.h>
13#include <scsi/scsi.h>
14#include <scsi/scsi_host.h>
15#include <scsi/scsi_cmnd.h>
16#include <scsi/scsi_device.h>
17
18#include "iochannel.h"
19
20
21
22#define IOS_ERROR_THRESHOLD 1000
23#define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS * 2)
24#define VISORHBA_ERROR_COUNT 30
25
26static struct dentry *visorhba_debugfs_dir;
27
28
29static struct visor_channeltype_descriptor visorhba_channel_types[] = {
30
31
32
33 { VISOR_VHBA_CHANNEL_GUID, "sparvhba", sizeof(struct channel_header),
34 VISOR_VHBA_CHANNEL_VERSIONID },
35 {}
36};
37
38MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
39MODULE_ALIAS("visorbus:" VISOR_VHBA_CHANNEL_GUID_STR);
40
41struct visordisk_info {
42 struct scsi_device *sdev;
43 u32 valid;
44 atomic_t ios_threshold;
45 atomic_t error_count;
46 struct visordisk_info *next;
47};
48
49struct scsipending {
50 struct uiscmdrsp cmdrsp;
51
52 void *sent;
53
54 char cmdtype;
55};
56
57
58struct visorhba_devdata {
59 struct Scsi_Host *scsihost;
60 struct visor_device *dev;
61 struct list_head dev_info_list;
62
63
64
65 struct scsipending pending[MAX_PENDING_REQUESTS];
66
67 unsigned int nextinsert;
68
69 spinlock_t privlock;
70 bool serverdown;
71 bool serverchangingstate;
72 unsigned long long acquire_failed_cnt;
73 unsigned long long interrupts_rcvd;
74 unsigned long long interrupts_notme;
75 unsigned long long interrupts_disabled;
76 u64 __iomem *flags_addr;
77 atomic_t interrupt_rcvd;
78 wait_queue_head_t rsp_queue;
79 struct visordisk_info head;
80 unsigned int max_buff_len;
81 int devnum;
82 struct task_struct *thread;
83 int thread_wait_ms;
84
85
86
87
88
89 struct idr idr;
90
91 struct dentry *debugfs_dir;
92 struct dentry *debugfs_info;
93};
94
95struct visorhba_devices_open {
96 struct visorhba_devdata *devdata;
97};
98
99
100
101
102
103
104
105
106
107
108
109
110static struct task_struct *visor_thread_start(int (*threadfn)(void *),
111 void *thrcontext, char *name)
112{
113 struct task_struct *task;
114
115 task = kthread_run(threadfn, thrcontext, "%s", name);
116 if (IS_ERR(task)) {
117 pr_err("visorbus failed to start thread\n");
118 return NULL;
119 }
120 return task;
121}
122
123
124
125
126
127static void visor_thread_stop(struct task_struct *task)
128{
129 kthread_stop(task);
130}
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146static int add_scsipending_entry(struct visorhba_devdata *devdata,
147 char cmdtype, void *new)
148{
149 unsigned long flags;
150 struct scsipending *entry;
151 int insert_location;
152
153 spin_lock_irqsave(&devdata->privlock, flags);
154 insert_location = devdata->nextinsert;
155 while (devdata->pending[insert_location].sent) {
156 insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
157 if (insert_location == (int)devdata->nextinsert) {
158 spin_unlock_irqrestore(&devdata->privlock, flags);
159 return -EBUSY;
160 }
161 }
162
163 entry = &devdata->pending[insert_location];
164 memset(&entry->cmdrsp, 0, sizeof(entry->cmdrsp));
165 entry->cmdtype = cmdtype;
166 if (new)
167 entry->sent = new;
168
169 else
170 entry->sent = &entry->cmdrsp;
171 devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
172 spin_unlock_irqrestore(&devdata->privlock, flags);
173
174 return insert_location;
175}
176
177
178
179
180
181
182
183
184
185
186static void *del_scsipending_ent(struct visorhba_devdata *devdata, int del)
187{
188 unsigned long flags;
189 void *sent;
190
191 if (del >= MAX_PENDING_REQUESTS)
192 return NULL;
193
194 spin_lock_irqsave(&devdata->privlock, flags);
195 sent = devdata->pending[del].sent;
196 devdata->pending[del].cmdtype = 0;
197 devdata->pending[del].sent = NULL;
198 spin_unlock_irqrestore(&devdata->privlock, flags);
199
200 return sent;
201}
202
203
204
205
206
207
208
209
210
211
212
213static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
214 int ent)
215{
216 if (ddata->pending[ent].sent)
217 return &ddata->pending[ent].cmdrsp;
218
219 return NULL;
220}
221
222
223
224
225
226
227
228
229
230
231
232
233static unsigned int simple_idr_get(struct idr *idrtable, void *p,
234 spinlock_t *lock)
235{
236 int id;
237 unsigned long flags;
238
239 idr_preload(GFP_KERNEL);
240 spin_lock_irqsave(lock, flags);
241 id = idr_alloc(idrtable, p, 1, INT_MAX, GFP_NOWAIT);
242 spin_unlock_irqrestore(lock, flags);
243 idr_preload_end();
244
245 if (id < 0)
246 return 0;
247
248 return (unsigned int)(id);
249}
250
251
252
253
254
255
256
257
258
259
260
261
262static void setup_scsitaskmgmt_handles(struct idr *idrtable, spinlock_t *lock,
263 struct uiscmdrsp *cmdrsp,
264 wait_queue_head_t *event, int *result)
265{
266
267
268 cmdrsp->scsitaskmgmt.notify_handle =
269 simple_idr_get(idrtable, event, lock);
270 cmdrsp->scsitaskmgmt.notifyresult_handle =
271 simple_idr_get(idrtable, result, lock);
272}
273
274
275
276
277
278
279
280static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
281 struct uiscmdrsp *cmdrsp)
282{
283 if (cmdrsp->scsitaskmgmt.notify_handle)
284 idr_remove(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
285 if (cmdrsp->scsitaskmgmt.notifyresult_handle)
286 idr_remove(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
287}
288
289
290
291
292
293
294
295
296
297
298
299
300static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
301 struct scsi_device *scsidev)
302{
303 struct uiscmdrsp *cmdrsp;
304 struct visorhba_devdata *devdata =
305 (struct visorhba_devdata *)scsidev->host->hostdata;
306 int notifyresult = 0xffff;
307 wait_queue_head_t notifyevent;
308 int scsicmd_id;
309
310 if (devdata->serverdown || devdata->serverchangingstate)
311 return FAILED;
312
313 scsicmd_id = add_scsipending_entry(devdata, CMD_SCSITASKMGMT_TYPE,
314 NULL);
315 if (scsicmd_id < 0)
316 return FAILED;
317
318 cmdrsp = get_scsipending_cmdrsp(devdata, scsicmd_id);
319
320 init_waitqueue_head(¬ifyevent);
321
322
323 cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
324 setup_scsitaskmgmt_handles(&devdata->idr, &devdata->privlock, cmdrsp,
325 ¬ifyevent, ¬ifyresult);
326
327
328 cmdrsp->scsitaskmgmt.tasktype = tasktype;
329 cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
330 cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
331 cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
332 cmdrsp->scsitaskmgmt.handle = scsicmd_id;
333
334 dev_dbg(&scsidev->sdev_gendev,
335 "visorhba: initiating type=%d taskmgmt command\n", tasktype);
336 if (visorchannel_signalinsert(devdata->dev->visorchannel,
337 IOCHAN_TO_IOPART,
338 cmdrsp))
339 goto err_del_scsipending_ent;
340
341
342
343
344 if (!wait_event_timeout(notifyevent, notifyresult != 0xffff,
345 msecs_to_jiffies(45000)))
346 goto err_del_scsipending_ent;
347
348 dev_dbg(&scsidev->sdev_gendev,
349 "visorhba: taskmgmt type=%d success; result=0x%x\n",
350 tasktype, notifyresult);
351 cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
352 return SUCCESS;
353
354err_del_scsipending_ent:
355 dev_dbg(&scsidev->sdev_gendev,
356 "visorhba: taskmgmt type=%d not executed\n", tasktype);
357 del_scsipending_ent(devdata, scsicmd_id);
358 cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
359 return FAILED;
360}
361
362
363
364
365
366
367
368static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
369{
370
371 struct scsi_device *scsidev;
372 struct visordisk_info *vdisk;
373 int rtn;
374
375 scsidev = scsicmd->device;
376 vdisk = scsidev->hostdata;
377 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
378 atomic_inc(&vdisk->error_count);
379 else
380 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
381 rtn = forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsidev);
382 if (rtn == SUCCESS) {
383 scsicmd->result = DID_ABORT << 16;
384 scsicmd->scsi_done(scsicmd);
385 }
386 return rtn;
387}
388
389
390
391
392
393
394
395static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
396{
397
398 struct scsi_device *scsidev;
399 struct visordisk_info *vdisk;
400 int rtn;
401
402 scsidev = scsicmd->device;
403 vdisk = scsidev->hostdata;
404 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
405 atomic_inc(&vdisk->error_count);
406 else
407 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
408 rtn = forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsidev);
409 if (rtn == SUCCESS) {
410 scsicmd->result = DID_RESET << 16;
411 scsicmd->scsi_done(scsicmd);
412 }
413 return rtn;
414}
415
416
417
418
419
420
421
422
423static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
424{
425 struct scsi_device *scsidev;
426 struct visordisk_info *vdisk;
427 int rtn;
428
429 scsidev = scsicmd->device;
430 shost_for_each_device(scsidev, scsidev->host) {
431 vdisk = scsidev->hostdata;
432 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
433 atomic_inc(&vdisk->error_count);
434 else
435 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
436 }
437 rtn = forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsidev);
438 if (rtn == SUCCESS) {
439 scsicmd->result = DID_RESET << 16;
440 scsicmd->scsi_done(scsicmd);
441 }
442 return rtn;
443}
444
445
446
447
448
449
450
451static int visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
452{
453
454 return SUCCESS;
455}
456
457
458
459
460
461
462
463static const char *visorhba_get_info(struct Scsi_Host *shp)
464{
465
466 return "visorhba";
467}
468
469
470
471
472
473
474
475
476static u32 dma_data_dir_linux_to_spar(enum dma_data_direction d)
477{
478 switch (d) {
479 case DMA_BIDIRECTIONAL:
480 return UIS_DMA_BIDIRECTIONAL;
481 case DMA_TO_DEVICE:
482 return UIS_DMA_TO_DEVICE;
483 case DMA_FROM_DEVICE:
484 return UIS_DMA_FROM_DEVICE;
485 case DMA_NONE:
486 return UIS_DMA_NONE;
487 default:
488 return UIS_DMA_NONE;
489 }
490}
491
492
493
494
495
496
497
498
499
500
501
502
503static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
504 void (*visorhba_cmnd_done)
505 (struct scsi_cmnd *))
506{
507 struct uiscmdrsp *cmdrsp;
508 struct scsi_device *scsidev = scsicmd->device;
509 int insert_location;
510 unsigned char *cdb = scsicmd->cmnd;
511 struct Scsi_Host *scsihost = scsidev->host;
512 unsigned int i;
513 struct visorhba_devdata *devdata =
514 (struct visorhba_devdata *)scsihost->hostdata;
515 struct scatterlist *sg = NULL;
516 struct scatterlist *sglist = NULL;
517
518 if (devdata->serverdown || devdata->serverchangingstate)
519 return SCSI_MLQUEUE_DEVICE_BUSY;
520
521 insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE,
522 (void *)scsicmd);
523 if (insert_location < 0)
524 return SCSI_MLQUEUE_DEVICE_BUSY;
525
526 cmdrsp = get_scsipending_cmdrsp(devdata, insert_location);
527 cmdrsp->cmdtype = CMD_SCSI_TYPE;
528
529
530
531 cmdrsp->scsi.handle = insert_location;
532
533
534 scsicmd->scsi_done = visorhba_cmnd_done;
535
536 cmdrsp->scsi.vdest.channel = scsidev->channel;
537 cmdrsp->scsi.vdest.id = scsidev->id;
538 cmdrsp->scsi.vdest.lun = scsidev->lun;
539
540 cmdrsp->scsi.data_dir =
541 dma_data_dir_linux_to_spar(scsicmd->sc_data_direction);
542 memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
543 cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
544
545
546 if (cmdrsp->scsi.bufflen > devdata->max_buff_len)
547 devdata->max_buff_len = cmdrsp->scsi.bufflen;
548
549 if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO)
550 goto err_del_scsipending_ent;
551
552
553
554 sglist = scsi_sglist(scsicmd);
555
556 for_each_sg(sglist, sg, scsi_sg_count(scsicmd), i) {
557 cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
558 cmdrsp->scsi.gpi_list[i].length = sg->length;
559 }
560 cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
561
562 if (visorchannel_signalinsert(devdata->dev->visorchannel,
563 IOCHAN_TO_IOPART,
564 cmdrsp))
565
566 goto err_del_scsipending_ent;
567
568 return 0;
569
570err_del_scsipending_ent:
571 del_scsipending_ent(devdata, insert_location);
572 return SCSI_MLQUEUE_DEVICE_BUSY;
573}
574
575#ifdef DEF_SCSI_QCMD
576static DEF_SCSI_QCMD(visorhba_queue_command)
577#else
578#define visorhba_queue_command visorhba_queue_command_lck
579#endif
580
581
582
583
584
585
586
587
588
589
590static int visorhba_slave_alloc(struct scsi_device *scsidev)
591{
592
593
594
595 struct visordisk_info *vdisk;
596 struct visorhba_devdata *devdata;
597 struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
598
599
600 if (scsidev->hostdata)
601 return 0;
602
603
604 devdata = (struct visorhba_devdata *)scsihost->hostdata;
605 if (!devdata)
606 return 0;
607
608 vdisk = kzalloc(sizeof(*vdisk), GFP_ATOMIC);
609 if (!vdisk)
610 return -ENOMEM;
611
612 vdisk->sdev = scsidev;
613 scsidev->hostdata = vdisk;
614 return 0;
615}
616
617
618
619
620
621static void visorhba_slave_destroy(struct scsi_device *scsidev)
622{
623
624
625
626 struct visordisk_info *vdisk;
627
628 vdisk = scsidev->hostdata;
629 scsidev->hostdata = NULL;
630 kfree(vdisk);
631}
632
633static struct scsi_host_template visorhba_driver_template = {
634 .name = "Unisys Visor HBA",
635 .info = visorhba_get_info,
636 .queuecommand = visorhba_queue_command,
637 .eh_abort_handler = visorhba_abort_handler,
638 .eh_device_reset_handler = visorhba_device_reset_handler,
639 .eh_bus_reset_handler = visorhba_bus_reset_handler,
640 .eh_host_reset_handler = visorhba_host_reset_handler,
641 .shost_attrs = NULL,
642#define visorhba_MAX_CMNDS 128
643 .can_queue = visorhba_MAX_CMNDS,
644 .sg_tablesize = 64,
645 .this_id = -1,
646 .slave_alloc = visorhba_slave_alloc,
647 .slave_destroy = visorhba_slave_destroy,
648};
649
650
651
652
653
654
655
656
657
658
659static int info_debugfs_show(struct seq_file *seq, void *v)
660{
661 struct visorhba_devdata *devdata = seq->private;
662
663 seq_printf(seq, "max_buff_len = %u\n", devdata->max_buff_len);
664 seq_printf(seq, "interrupts_rcvd = %llu\n", devdata->interrupts_rcvd);
665 seq_printf(seq, "interrupts_disabled = %llu\n",
666 devdata->interrupts_disabled);
667 seq_printf(seq, "interrupts_notme = %llu\n",
668 devdata->interrupts_notme);
669 seq_printf(seq, "flags_addr = %p\n", devdata->flags_addr);
670 if (devdata->flags_addr) {
671 u64 phys_flags_addr =
672 virt_to_phys((__force void *)devdata->flags_addr);
673 seq_printf(seq, "phys_flags_addr = 0x%016llx\n",
674 phys_flags_addr);
675 seq_printf(seq, "FeatureFlags = %llu\n",
676 (u64)readq(devdata->flags_addr));
677 }
678 seq_printf(seq, "acquire_failed_cnt = %llu\n",
679 devdata->acquire_failed_cnt);
680
681 return 0;
682}
683DEFINE_SHOW_ATTRIBUTE(info_debugfs);
684
685
686
687
688
689
690
691
692
693
694static void complete_taskmgmt_command(struct idr *idrtable,
695 struct uiscmdrsp *cmdrsp, int result)
696{
697 wait_queue_head_t *wq =
698 idr_find(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
699 int *scsi_result_ptr =
700 idr_find(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
701 if (unlikely(!(wq && scsi_result_ptr))) {
702 pr_err("visorhba: no completion context; cmd will time out\n");
703 return;
704 }
705
706
707
708
709 pr_debug("visorhba: notifying initiator with result=0x%x\n", result);
710 *scsi_result_ptr = result;
711 wake_up_all(wq);
712}
713
714
715
716
717
718
719
720
721
722static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
723{
724 int i;
725 struct scsipending *pendingdel = NULL;
726 struct scsi_cmnd *scsicmd = NULL;
727 struct uiscmdrsp *cmdrsp;
728 unsigned long flags;
729
730
731
732
733 visor_thread_stop(devdata->thread);
734
735
736 spin_lock_irqsave(&devdata->privlock, flags);
737 for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
738 pendingdel = &devdata->pending[i];
739 switch (pendingdel->cmdtype) {
740 case CMD_SCSI_TYPE:
741 scsicmd = pendingdel->sent;
742 scsicmd->result = DID_RESET << 16;
743 if (scsicmd->scsi_done)
744 scsicmd->scsi_done(scsicmd);
745 break;
746 case CMD_SCSITASKMGMT_TYPE:
747 cmdrsp = pendingdel->sent;
748 complete_taskmgmt_command(&devdata->idr, cmdrsp,
749 TASK_MGMT_FAILED);
750 break;
751 default:
752 break;
753 }
754 pendingdel->cmdtype = 0;
755 pendingdel->sent = NULL;
756 }
757 spin_unlock_irqrestore(&devdata->privlock, flags);
758
759 devdata->serverdown = true;
760 devdata->serverchangingstate = false;
761}
762
763
764
765
766
767
768
769
770
771
772static int visorhba_serverdown(struct visorhba_devdata *devdata)
773{
774 if (!devdata->serverdown && !devdata->serverchangingstate) {
775 devdata->serverchangingstate = true;
776 visorhba_serverdown_complete(devdata);
777 } else if (devdata->serverchangingstate) {
778 return -EINVAL;
779 }
780 return 0;
781}
782
783
784
785
786
787
788
789
790static void do_scsi_linuxstat(struct uiscmdrsp *cmdrsp,
791 struct scsi_cmnd *scsicmd)
792{
793 struct visordisk_info *vdisk;
794 struct scsi_device *scsidev;
795
796 scsidev = scsicmd->device;
797 memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
798
799
800 if (cmdrsp->scsi.cmnd[0] == INQUIRY &&
801 (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
802 cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT)
803 return;
804
805 vdisk = scsidev->hostdata;
806 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
807 atomic_inc(&vdisk->error_count);
808 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
809 }
810}
811
812static int set_no_disk_inquiry_result(unsigned char *buf, size_t len,
813 bool is_lun0)
814{
815 if (len < NO_DISK_INQUIRY_RESULT_LEN)
816 return -EINVAL;
817 memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
818 buf[2] = SCSI_SPC2_VER;
819 if (is_lun0) {
820 buf[0] = DEV_DISK_CAPABLE_NOT_PRESENT;
821 buf[3] = DEV_HISUPPORT;
822 } else {
823 buf[0] = DEV_NOT_CAPABLE;
824 }
825 buf[4] = NO_DISK_INQUIRY_RESULT_LEN - 5;
826 strncpy(buf + 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN - 8);
827 return 0;
828}
829
830
831
832
833
834
835
836
837static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp,
838 struct scsi_cmnd *scsicmd)
839{
840 struct scsi_device *scsidev;
841 unsigned char *buf;
842 struct scatterlist *sg;
843 unsigned int i;
844 char *this_page;
845 char *this_page_orig;
846 int bufind = 0;
847 struct visordisk_info *vdisk;
848
849 scsidev = scsicmd->device;
850 if (cmdrsp->scsi.cmnd[0] == INQUIRY &&
851 cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN) {
852 if (cmdrsp->scsi.no_disk_result == 0)
853 return;
854
855 buf = kzalloc(36, GFP_KERNEL);
856 if (!buf)
857 return;
858
859
860
861
862
863
864 set_no_disk_inquiry_result(buf, (size_t)cmdrsp->scsi.bufflen,
865 scsidev->lun == 0);
866
867 if (scsi_sg_count(scsicmd) == 0) {
868 memcpy(scsi_sglist(scsicmd), buf,
869 cmdrsp->scsi.bufflen);
870 kfree(buf);
871 return;
872 }
873
874 scsi_for_each_sg(scsicmd, sg, scsi_sg_count(scsicmd), i) {
875 this_page_orig = kmap_atomic(sg_page(sg));
876 this_page = (void *)((unsigned long)this_page_orig |
877 sg->offset);
878 memcpy(this_page, buf + bufind, sg->length);
879 kunmap_atomic(this_page_orig);
880 }
881 kfree(buf);
882 } else {
883 vdisk = scsidev->hostdata;
884 if (atomic_read(&vdisk->ios_threshold) > 0) {
885 atomic_dec(&vdisk->ios_threshold);
886 if (atomic_read(&vdisk->ios_threshold) == 0)
887 atomic_set(&vdisk->error_count, 0);
888 }
889 }
890}
891
892
893
894
895
896
897
898
899
900static void complete_scsi_command(struct uiscmdrsp *cmdrsp,
901 struct scsi_cmnd *scsicmd)
902{
903
904 scsicmd->result = cmdrsp->scsi.linuxstat;
905 if (cmdrsp->scsi.linuxstat)
906 do_scsi_linuxstat(cmdrsp, scsicmd);
907 else
908 do_scsi_nolinuxstat(cmdrsp, scsicmd);
909
910 scsicmd->scsi_done(scsicmd);
911}
912
913
914
915
916
917
918
919
920static void drain_queue(struct uiscmdrsp *cmdrsp,
921 struct visorhba_devdata *devdata)
922{
923 struct scsi_cmnd *scsicmd;
924
925 while (1) {
926
927 if (visorchannel_signalremove(devdata->dev->visorchannel,
928 IOCHAN_FROM_IOPART,
929 cmdrsp))
930 break;
931 if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
932
933
934
935 scsicmd = del_scsipending_ent(devdata,
936 cmdrsp->scsi.handle);
937 if (!scsicmd)
938 break;
939
940 complete_scsi_command(cmdrsp, scsicmd);
941 } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
942 if (!del_scsipending_ent(devdata,
943 cmdrsp->scsitaskmgmt.handle))
944 break;
945 complete_taskmgmt_command(&devdata->idr, cmdrsp,
946 cmdrsp->scsitaskmgmt.result);
947 } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE)
948 dev_err_once(&devdata->dev->device,
949 "ignoring unsupported NOTIFYGUEST\n");
950
951 }
952}
953
954
955
956
957
958
959
960
961
962
963
964static int process_incoming_rsps(void *v)
965{
966 struct visorhba_devdata *devdata = v;
967 struct uiscmdrsp *cmdrsp = NULL;
968 const int size = sizeof(*cmdrsp);
969
970 cmdrsp = kmalloc(size, GFP_ATOMIC);
971 if (!cmdrsp)
972 return -ENOMEM;
973
974 while (1) {
975 if (kthread_should_stop())
976 break;
977 wait_event_interruptible_timeout(
978 devdata->rsp_queue, (atomic_read(
979 &devdata->interrupt_rcvd) == 1),
980 msecs_to_jiffies(devdata->thread_wait_ms));
981
982 drain_queue(cmdrsp, devdata);
983 }
984 kfree(cmdrsp);
985 return 0;
986}
987
988
989
990
991
992
993
994
995
996
997
998
999static int visorhba_pause(struct visor_device *dev,
1000 visorbus_state_complete_func complete_func)
1001{
1002 struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1003
1004 visorhba_serverdown(devdata);
1005 complete_func(dev, 0);
1006 return 0;
1007}
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019static int visorhba_resume(struct visor_device *dev,
1020 visorbus_state_complete_func complete_func)
1021{
1022 struct visorhba_devdata *devdata;
1023
1024 devdata = dev_get_drvdata(&dev->device);
1025 if (!devdata)
1026 return -EINVAL;
1027
1028 if (devdata->serverdown && !devdata->serverchangingstate)
1029 devdata->serverchangingstate = true;
1030
1031 devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1032 "vhba_incming");
1033 devdata->serverdown = false;
1034 devdata->serverchangingstate = false;
1035
1036 return 0;
1037}
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047static int visorhba_probe(struct visor_device *dev)
1048{
1049 struct Scsi_Host *scsihost;
1050 struct vhba_config_max max;
1051 struct visorhba_devdata *devdata = NULL;
1052 int err, channel_offset;
1053 u64 features;
1054
1055 scsihost = scsi_host_alloc(&visorhba_driver_template,
1056 sizeof(*devdata));
1057 if (!scsihost)
1058 return -ENODEV;
1059
1060 channel_offset = offsetof(struct visor_io_channel, vhba.max);
1061 err = visorbus_read_channel(dev, channel_offset, &max,
1062 sizeof(struct vhba_config_max));
1063 if (err < 0)
1064 goto err_scsi_host_put;
1065
1066 scsihost->max_id = (unsigned int)max.max_id;
1067 scsihost->max_lun = (unsigned int)max.max_lun;
1068 scsihost->cmd_per_lun = (unsigned int)max.cmd_per_lun;
1069 scsihost->max_sectors =
1070 (unsigned short)(max.max_io_size >> 9);
1071 scsihost->sg_tablesize =
1072 (unsigned short)(max.max_io_size / PAGE_SIZE);
1073 if (scsihost->sg_tablesize > MAX_PHYS_INFO)
1074 scsihost->sg_tablesize = MAX_PHYS_INFO;
1075 err = scsi_add_host(scsihost, &dev->device);
1076 if (err < 0)
1077 goto err_scsi_host_put;
1078
1079 devdata = (struct visorhba_devdata *)scsihost->hostdata;
1080 devdata->dev = dev;
1081 dev_set_drvdata(&dev->device, devdata);
1082
1083 devdata->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
1084 visorhba_debugfs_dir);
1085 if (!devdata->debugfs_dir) {
1086 err = -ENOMEM;
1087 goto err_scsi_remove_host;
1088 }
1089 devdata->debugfs_info =
1090 debugfs_create_file("info", 0440,
1091 devdata->debugfs_dir, devdata,
1092 &info_debugfs_fops);
1093 if (!devdata->debugfs_info) {
1094 err = -ENOMEM;
1095 goto err_debugfs_dir;
1096 }
1097
1098 init_waitqueue_head(&devdata->rsp_queue);
1099 spin_lock_init(&devdata->privlock);
1100 devdata->serverdown = false;
1101 devdata->serverchangingstate = false;
1102 devdata->scsihost = scsihost;
1103
1104 channel_offset = offsetof(struct visor_io_channel,
1105 channel_header.features);
1106 err = visorbus_read_channel(dev, channel_offset, &features, 8);
1107 if (err)
1108 goto err_debugfs_info;
1109 features |= VISOR_CHANNEL_IS_POLLING;
1110 err = visorbus_write_channel(dev, channel_offset, &features, 8);
1111 if (err)
1112 goto err_debugfs_info;
1113
1114 idr_init(&devdata->idr);
1115
1116 devdata->thread_wait_ms = 2;
1117 devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1118 "vhba_incoming");
1119
1120 scsi_scan_host(scsihost);
1121
1122 return 0;
1123
1124err_debugfs_info:
1125 debugfs_remove(devdata->debugfs_info);
1126
1127err_debugfs_dir:
1128 debugfs_remove_recursive(devdata->debugfs_dir);
1129
1130err_scsi_remove_host:
1131 scsi_remove_host(scsihost);
1132
1133err_scsi_host_put:
1134 scsi_host_put(scsihost);
1135 return err;
1136}
1137
1138
1139
1140
1141
1142
1143
1144static void visorhba_remove(struct visor_device *dev)
1145{
1146 struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1147 struct Scsi_Host *scsihost = NULL;
1148
1149 if (!devdata)
1150 return;
1151
1152 scsihost = devdata->scsihost;
1153 visor_thread_stop(devdata->thread);
1154 scsi_remove_host(scsihost);
1155 scsi_host_put(scsihost);
1156
1157 idr_destroy(&devdata->idr);
1158
1159 dev_set_drvdata(&dev->device, NULL);
1160 debugfs_remove(devdata->debugfs_info);
1161 debugfs_remove_recursive(devdata->debugfs_dir);
1162}
1163
1164
1165
1166
1167
1168static struct visor_driver visorhba_driver = {
1169 .name = "visorhba",
1170 .owner = THIS_MODULE,
1171 .channel_types = visorhba_channel_types,
1172 .probe = visorhba_probe,
1173 .remove = visorhba_remove,
1174 .pause = visorhba_pause,
1175 .resume = visorhba_resume,
1176 .channel_interrupt = NULL,
1177};
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187static int visorhba_init(void)
1188{
1189 int rc;
1190
1191 visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
1192 if (!visorhba_debugfs_dir)
1193 return -ENOMEM;
1194
1195 rc = visorbus_register_visor_driver(&visorhba_driver);
1196 if (rc)
1197 goto cleanup_debugfs;
1198
1199 return 0;
1200
1201cleanup_debugfs:
1202 debugfs_remove_recursive(visorhba_debugfs_dir);
1203
1204 return rc;
1205}
1206
1207
1208
1209
1210
1211
1212static void visorhba_exit(void)
1213{
1214 visorbus_unregister_visor_driver(&visorhba_driver);
1215 debugfs_remove_recursive(visorhba_debugfs_dir);
1216}
1217
1218module_init(visorhba_init);
1219module_exit(visorhba_exit);
1220
1221MODULE_AUTHOR("Unisys");
1222MODULE_LICENSE("GPL");
1223MODULE_DESCRIPTION("s-Par HBA driver for virtual SCSI host busses");
1224