1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/debugfs.h>
18#include <linux/kthread.h>
19#include <linux/idr.h>
20#include <linux/module.h>
21#include <linux/seq_file.h>
22#include <scsi/scsi.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_device.h>
26
27#include "visorbus.h"
28#include "iochannel.h"
29
30
31
32#define IOS_ERROR_THRESHOLD 1000
33#define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS * 2)
34#define VISORHBA_ERROR_COUNT 30
35
36static struct dentry *visorhba_debugfs_dir;
37
38
39static struct visor_channeltype_descriptor visorhba_channel_types[] = {
40
41
42
43 { VISOR_VHBA_CHANNEL_GUID, "sparvhba", sizeof(struct channel_header),
44 VISOR_VHBA_CHANNEL_VERSIONID },
45 {}
46};
47
48MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
49MODULE_ALIAS("visorbus:" VISOR_VHBA_CHANNEL_GUID_STR);
50
51struct visordisk_info {
52 struct scsi_device *sdev;
53 u32 valid;
54 atomic_t ios_threshold;
55 atomic_t error_count;
56 struct visordisk_info *next;
57};
58
59struct scsipending {
60 struct uiscmdrsp cmdrsp;
61
62 void *sent;
63
64 char cmdtype;
65};
66
67
68struct visorhba_devdata {
69 struct Scsi_Host *scsihost;
70 struct visor_device *dev;
71 struct list_head dev_info_list;
72
73
74
75 struct scsipending pending[MAX_PENDING_REQUESTS];
76
77 unsigned int nextinsert;
78
79 spinlock_t privlock;
80 bool serverdown;
81 bool serverchangingstate;
82 unsigned long long acquire_failed_cnt;
83 unsigned long long interrupts_rcvd;
84 unsigned long long interrupts_notme;
85 unsigned long long interrupts_disabled;
86 u64 __iomem *flags_addr;
87 atomic_t interrupt_rcvd;
88 wait_queue_head_t rsp_queue;
89 struct visordisk_info head;
90 unsigned int max_buff_len;
91 int devnum;
92 struct task_struct *thread;
93 int thread_wait_ms;
94
95
96
97
98
99 struct idr idr;
100
101 struct dentry *debugfs_dir;
102 struct dentry *debugfs_info;
103};
104
105struct visorhba_devices_open {
106 struct visorhba_devdata *devdata;
107};
108
109
110
111
112
113
114
115
116
117
118
119
120static struct task_struct *visor_thread_start(int (*threadfn)(void *),
121 void *thrcontext, char *name)
122{
123 struct task_struct *task;
124
125 task = kthread_run(threadfn, thrcontext, "%s", name);
126 if (IS_ERR(task)) {
127 pr_err("visorbus failed to start thread\n");
128 return NULL;
129 }
130 return task;
131}
132
133
134
135
136
137static void visor_thread_stop(struct task_struct *task)
138{
139 kthread_stop(task);
140}
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156static int add_scsipending_entry(struct visorhba_devdata *devdata,
157 char cmdtype, void *new)
158{
159 unsigned long flags;
160 struct scsipending *entry;
161 int insert_location;
162
163 spin_lock_irqsave(&devdata->privlock, flags);
164 insert_location = devdata->nextinsert;
165 while (devdata->pending[insert_location].sent) {
166 insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
167 if (insert_location == (int)devdata->nextinsert) {
168 spin_unlock_irqrestore(&devdata->privlock, flags);
169 return -EBUSY;
170 }
171 }
172
173 entry = &devdata->pending[insert_location];
174 memset(&entry->cmdrsp, 0, sizeof(entry->cmdrsp));
175 entry->cmdtype = cmdtype;
176 if (new)
177 entry->sent = new;
178
179 else
180 entry->sent = &entry->cmdrsp;
181 devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
182 spin_unlock_irqrestore(&devdata->privlock, flags);
183
184 return insert_location;
185}
186
187
188
189
190
191
192
193
194
195
196static void *del_scsipending_ent(struct visorhba_devdata *devdata, int del)
197{
198 unsigned long flags;
199 void *sent;
200
201 if (del >= MAX_PENDING_REQUESTS)
202 return NULL;
203
204 spin_lock_irqsave(&devdata->privlock, flags);
205 sent = devdata->pending[del].sent;
206 devdata->pending[del].cmdtype = 0;
207 devdata->pending[del].sent = NULL;
208 spin_unlock_irqrestore(&devdata->privlock, flags);
209
210 return sent;
211}
212
213
214
215
216
217
218
219
220
221
222
223static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
224 int ent)
225{
226 if (ddata->pending[ent].sent)
227 return &ddata->pending[ent].cmdrsp;
228
229 return NULL;
230}
231
232
233
234
235
236
237
238
239
240
241
242
243static unsigned int simple_idr_get(struct idr *idrtable, void *p,
244 spinlock_t *lock)
245{
246 int id;
247 unsigned long flags;
248
249 idr_preload(GFP_KERNEL);
250 spin_lock_irqsave(lock, flags);
251 id = idr_alloc(idrtable, p, 1, INT_MAX, GFP_NOWAIT);
252 spin_unlock_irqrestore(lock, flags);
253 idr_preload_end();
254
255 if (id < 0)
256 return 0;
257
258 return (unsigned int)(id);
259}
260
261
262
263
264
265
266
267
268
269
270
271
272static void setup_scsitaskmgmt_handles(struct idr *idrtable, spinlock_t *lock,
273 struct uiscmdrsp *cmdrsp,
274 wait_queue_head_t *event, int *result)
275{
276
277
278 cmdrsp->scsitaskmgmt.notify_handle =
279 simple_idr_get(idrtable, event, lock);
280 cmdrsp->scsitaskmgmt.notifyresult_handle =
281 simple_idr_get(idrtable, result, lock);
282}
283
284
285
286
287
288
289
290static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
291 struct uiscmdrsp *cmdrsp)
292{
293 if (cmdrsp->scsitaskmgmt.notify_handle)
294 idr_remove(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
295 if (cmdrsp->scsitaskmgmt.notifyresult_handle)
296 idr_remove(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
297}
298
299
300
301
302
303
304
305
306
307
308
309
310static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
311 struct scsi_device *scsidev)
312{
313 struct uiscmdrsp *cmdrsp;
314 struct visorhba_devdata *devdata =
315 (struct visorhba_devdata *)scsidev->host->hostdata;
316 int notifyresult = 0xffff;
317 wait_queue_head_t notifyevent;
318 int scsicmd_id = 0;
319
320 if (devdata->serverdown || devdata->serverchangingstate)
321 return FAILED;
322
323 scsicmd_id = add_scsipending_entry(devdata, CMD_SCSITASKMGMT_TYPE,
324 NULL);
325 if (scsicmd_id < 0)
326 return FAILED;
327
328 cmdrsp = get_scsipending_cmdrsp(devdata, scsicmd_id);
329
330 init_waitqueue_head(¬ifyevent);
331
332
333 cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
334 setup_scsitaskmgmt_handles(&devdata->idr, &devdata->privlock, cmdrsp,
335 ¬ifyevent, ¬ifyresult);
336
337
338 cmdrsp->scsitaskmgmt.tasktype = tasktype;
339 cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
340 cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
341 cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
342 cmdrsp->scsitaskmgmt.handle = scsicmd_id;
343
344 dev_dbg(&scsidev->sdev_gendev,
345 "visorhba: initiating type=%d taskmgmt command\n", tasktype);
346 if (visorchannel_signalinsert(devdata->dev->visorchannel,
347 IOCHAN_TO_IOPART,
348 cmdrsp))
349 goto err_del_scsipending_ent;
350
351
352
353
354 if (!wait_event_timeout(notifyevent, notifyresult != 0xffff,
355 msecs_to_jiffies(45000)))
356 goto err_del_scsipending_ent;
357
358 dev_dbg(&scsidev->sdev_gendev,
359 "visorhba: taskmgmt type=%d success; result=0x%x\n",
360 tasktype, notifyresult);
361 cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
362 return SUCCESS;
363
364err_del_scsipending_ent:
365 dev_dbg(&scsidev->sdev_gendev,
366 "visorhba: taskmgmt type=%d not executed\n", tasktype);
367 del_scsipending_ent(devdata, scsicmd_id);
368 cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
369 return FAILED;
370}
371
372
373
374
375
376
377
378static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
379{
380
381 struct scsi_device *scsidev;
382 struct visordisk_info *vdisk;
383 int rtn;
384
385 scsidev = scsicmd->device;
386 vdisk = scsidev->hostdata;
387 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
388 atomic_inc(&vdisk->error_count);
389 else
390 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
391 rtn = forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsidev);
392 if (rtn == SUCCESS) {
393 scsicmd->result = DID_ABORT << 16;
394 scsicmd->scsi_done(scsicmd);
395 }
396 return rtn;
397}
398
399
400
401
402
403
404
405static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
406{
407
408 struct scsi_device *scsidev;
409 struct visordisk_info *vdisk;
410 int rtn;
411
412 scsidev = scsicmd->device;
413 vdisk = scsidev->hostdata;
414 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
415 atomic_inc(&vdisk->error_count);
416 else
417 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
418 rtn = forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsidev);
419 if (rtn == SUCCESS) {
420 scsicmd->result = DID_RESET << 16;
421 scsicmd->scsi_done(scsicmd);
422 }
423 return rtn;
424}
425
426
427
428
429
430
431
432
433static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
434{
435 struct scsi_device *scsidev;
436 struct visordisk_info *vdisk;
437 int rtn;
438
439 scsidev = scsicmd->device;
440 shost_for_each_device(scsidev, scsidev->host) {
441 vdisk = scsidev->hostdata;
442 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
443 atomic_inc(&vdisk->error_count);
444 else
445 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
446 }
447 rtn = forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsidev);
448 if (rtn == SUCCESS) {
449 scsicmd->result = DID_RESET << 16;
450 scsicmd->scsi_done(scsicmd);
451 }
452 return rtn;
453}
454
455
456
457
458
459
460
461static int visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
462{
463
464 return SUCCESS;
465}
466
467
468
469
470
471
472
473static const char *visorhba_get_info(struct Scsi_Host *shp)
474{
475
476 return "visorhba";
477}
478
479
480
481
482
483
484
485
486static u32 dma_data_dir_linux_to_spar(enum dma_data_direction d)
487{
488 switch (d) {
489 case DMA_BIDIRECTIONAL:
490 return UIS_DMA_BIDIRECTIONAL;
491 case DMA_TO_DEVICE:
492 return UIS_DMA_TO_DEVICE;
493 case DMA_FROM_DEVICE:
494 return UIS_DMA_FROM_DEVICE;
495 case DMA_NONE:
496 return UIS_DMA_NONE;
497 default:
498 return UIS_DMA_NONE;
499 }
500}
501
502
503
504
505
506
507
508
509
510
511
512
513static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
514 void (*visorhba_cmnd_done)
515 (struct scsi_cmnd *))
516{
517 struct uiscmdrsp *cmdrsp;
518 struct scsi_device *scsidev = scsicmd->device;
519 int insert_location;
520 unsigned char *cdb = scsicmd->cmnd;
521 struct Scsi_Host *scsihost = scsidev->host;
522 unsigned int i;
523 struct visorhba_devdata *devdata =
524 (struct visorhba_devdata *)scsihost->hostdata;
525 struct scatterlist *sg = NULL;
526 struct scatterlist *sglist = NULL;
527
528 if (devdata->serverdown || devdata->serverchangingstate)
529 return SCSI_MLQUEUE_DEVICE_BUSY;
530
531 insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE,
532 (void *)scsicmd);
533 if (insert_location < 0)
534 return SCSI_MLQUEUE_DEVICE_BUSY;
535
536 cmdrsp = get_scsipending_cmdrsp(devdata, insert_location);
537 cmdrsp->cmdtype = CMD_SCSI_TYPE;
538
539
540
541 cmdrsp->scsi.handle = insert_location;
542
543
544 scsicmd->scsi_done = visorhba_cmnd_done;
545
546 cmdrsp->scsi.vdest.channel = scsidev->channel;
547 cmdrsp->scsi.vdest.id = scsidev->id;
548 cmdrsp->scsi.vdest.lun = scsidev->lun;
549
550 cmdrsp->scsi.data_dir =
551 dma_data_dir_linux_to_spar(scsicmd->sc_data_direction);
552 memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
553 cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
554
555
556 if (cmdrsp->scsi.bufflen > devdata->max_buff_len)
557 devdata->max_buff_len = cmdrsp->scsi.bufflen;
558
559 if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO)
560 goto err_del_scsipending_ent;
561
562
563
564 sglist = scsi_sglist(scsicmd);
565
566 for_each_sg(sglist, sg, scsi_sg_count(scsicmd), i) {
567 cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
568 cmdrsp->scsi.gpi_list[i].length = sg->length;
569 }
570 cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
571
572 if (visorchannel_signalinsert(devdata->dev->visorchannel,
573 IOCHAN_TO_IOPART,
574 cmdrsp))
575
576 goto err_del_scsipending_ent;
577
578 return 0;
579
580err_del_scsipending_ent:
581 del_scsipending_ent(devdata, insert_location);
582 return SCSI_MLQUEUE_DEVICE_BUSY;
583}
584
585#ifdef DEF_SCSI_QCMD
586static DEF_SCSI_QCMD(visorhba_queue_command)
587#else
588#define visorhba_queue_command visorhba_queue_command_lck
589#endif
590
591
592
593
594
595
596
597
598
599
600static int visorhba_slave_alloc(struct scsi_device *scsidev)
601{
602
603
604
605 struct visordisk_info *vdisk;
606 struct visorhba_devdata *devdata;
607 struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
608
609
610 if (scsidev->hostdata)
611 return 0;
612
613
614 devdata = (struct visorhba_devdata *)scsihost->hostdata;
615 if (!devdata)
616 return 0;
617
618 vdisk = kzalloc(sizeof(*vdisk), GFP_ATOMIC);
619 if (!vdisk)
620 return -ENOMEM;
621
622 vdisk->sdev = scsidev;
623 scsidev->hostdata = vdisk;
624 return 0;
625}
626
627
628
629
630
631static void visorhba_slave_destroy(struct scsi_device *scsidev)
632{
633
634
635
636 struct visordisk_info *vdisk;
637
638 vdisk = scsidev->hostdata;
639 scsidev->hostdata = NULL;
640 kfree(vdisk);
641}
642
643static struct scsi_host_template visorhba_driver_template = {
644 .name = "Unisys Visor HBA",
645 .info = visorhba_get_info,
646 .queuecommand = visorhba_queue_command,
647 .eh_abort_handler = visorhba_abort_handler,
648 .eh_device_reset_handler = visorhba_device_reset_handler,
649 .eh_bus_reset_handler = visorhba_bus_reset_handler,
650 .eh_host_reset_handler = visorhba_host_reset_handler,
651 .shost_attrs = NULL,
652#define visorhba_MAX_CMNDS 128
653 .can_queue = visorhba_MAX_CMNDS,
654 .sg_tablesize = 64,
655 .this_id = -1,
656 .slave_alloc = visorhba_slave_alloc,
657 .slave_destroy = visorhba_slave_destroy,
658 .use_clustering = ENABLE_CLUSTERING,
659};
660
661
662
663
664
665
666
667
668
669
670static int info_debugfs_show(struct seq_file *seq, void *v)
671{
672 struct visorhba_devdata *devdata = seq->private;
673
674 seq_printf(seq, "max_buff_len = %u\n", devdata->max_buff_len);
675 seq_printf(seq, "interrupts_rcvd = %llu\n", devdata->interrupts_rcvd);
676 seq_printf(seq, "interrupts_disabled = %llu\n",
677 devdata->interrupts_disabled);
678 seq_printf(seq, "interrupts_notme = %llu\n",
679 devdata->interrupts_notme);
680 seq_printf(seq, "flags_addr = %p\n", devdata->flags_addr);
681 if (devdata->flags_addr) {
682 u64 phys_flags_addr =
683 virt_to_phys((__force void *)devdata->flags_addr);
684 seq_printf(seq, "phys_flags_addr = 0x%016llx\n",
685 phys_flags_addr);
686 seq_printf(seq, "FeatureFlags = %llu\n",
687 (u64)readq(devdata->flags_addr));
688 }
689 seq_printf(seq, "acquire_failed_cnt = %llu\n",
690 devdata->acquire_failed_cnt);
691
692 return 0;
693}
694
695static int info_debugfs_open(struct inode *inode, struct file *file)
696{
697 return single_open(file, info_debugfs_show, inode->i_private);
698}
699
700static const struct file_operations info_debugfs_fops = {
701 .owner = THIS_MODULE,
702 .open = info_debugfs_open,
703 .read = seq_read,
704 .llseek = seq_lseek,
705 .release = single_release,
706};
707
708
709
710
711
712
713
714
715
716
717static void complete_taskmgmt_command(struct idr *idrtable,
718 struct uiscmdrsp *cmdrsp, int result)
719{
720 wait_queue_head_t *wq =
721 idr_find(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
722 int *scsi_result_ptr =
723 idr_find(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
724 if (unlikely(!(wq && scsi_result_ptr))) {
725 pr_err("visorhba: no completion context; cmd will time out\n");
726 return;
727 }
728
729
730
731
732 pr_debug("visorhba: notifying initiator with result=0x%x\n", result);
733 *scsi_result_ptr = result;
734 wake_up_all(wq);
735}
736
737
738
739
740
741
742
743
744
745static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
746{
747 int i;
748 struct scsipending *pendingdel = NULL;
749 struct scsi_cmnd *scsicmd = NULL;
750 struct uiscmdrsp *cmdrsp;
751 unsigned long flags;
752
753
754
755
756 visor_thread_stop(devdata->thread);
757
758
759 spin_lock_irqsave(&devdata->privlock, flags);
760 for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
761 pendingdel = &devdata->pending[i];
762 switch (pendingdel->cmdtype) {
763 case CMD_SCSI_TYPE:
764 scsicmd = pendingdel->sent;
765 scsicmd->result = DID_RESET << 16;
766 if (scsicmd->scsi_done)
767 scsicmd->scsi_done(scsicmd);
768 break;
769 case CMD_SCSITASKMGMT_TYPE:
770 cmdrsp = pendingdel->sent;
771 complete_taskmgmt_command(&devdata->idr, cmdrsp,
772 TASK_MGMT_FAILED);
773 break;
774 default:
775 break;
776 }
777 pendingdel->cmdtype = 0;
778 pendingdel->sent = NULL;
779 }
780 spin_unlock_irqrestore(&devdata->privlock, flags);
781
782 devdata->serverdown = true;
783 devdata->serverchangingstate = false;
784}
785
786
787
788
789
790
791
792
793
794
795static int visorhba_serverdown(struct visorhba_devdata *devdata)
796{
797 if (!devdata->serverdown && !devdata->serverchangingstate) {
798 devdata->serverchangingstate = true;
799 visorhba_serverdown_complete(devdata);
800 } else if (devdata->serverchangingstate) {
801 return -EINVAL;
802 }
803 return 0;
804}
805
806
807
808
809
810
811
812
813static void do_scsi_linuxstat(struct uiscmdrsp *cmdrsp,
814 struct scsi_cmnd *scsicmd)
815{
816 struct visordisk_info *vdisk;
817 struct scsi_device *scsidev;
818
819 scsidev = scsicmd->device;
820 memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
821
822
823 if (cmdrsp->scsi.cmnd[0] == INQUIRY &&
824 (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
825 cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT)
826 return;
827
828 vdisk = scsidev->hostdata;
829 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
830 atomic_inc(&vdisk->error_count);
831 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
832 }
833}
834
835static int set_no_disk_inquiry_result(unsigned char *buf, size_t len,
836 bool is_lun0)
837{
838 if (len < NO_DISK_INQUIRY_RESULT_LEN)
839 return -EINVAL;
840 memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
841 buf[2] = SCSI_SPC2_VER;
842 if (is_lun0) {
843 buf[0] = DEV_DISK_CAPABLE_NOT_PRESENT;
844 buf[3] = DEV_HISUPPORT;
845 } else {
846 buf[0] = DEV_NOT_CAPABLE;
847 }
848 buf[4] = NO_DISK_INQUIRY_RESULT_LEN - 5;
849 strncpy(buf + 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN - 8);
850 return 0;
851}
852
853
854
855
856
857
858
859
860static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp,
861 struct scsi_cmnd *scsicmd)
862{
863 struct scsi_device *scsidev;
864 unsigned char *buf;
865 struct scatterlist *sg;
866 unsigned int i;
867 char *this_page;
868 char *this_page_orig;
869 int bufind = 0;
870 struct visordisk_info *vdisk;
871
872 scsidev = scsicmd->device;
873 if (cmdrsp->scsi.cmnd[0] == INQUIRY &&
874 cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN) {
875 if (cmdrsp->scsi.no_disk_result == 0)
876 return;
877
878 buf = kzalloc(sizeof(char) * 36, GFP_KERNEL);
879 if (!buf)
880 return;
881
882
883
884
885
886
887 set_no_disk_inquiry_result(buf, (size_t)cmdrsp->scsi.bufflen,
888 scsidev->lun == 0);
889
890 if (scsi_sg_count(scsicmd) == 0) {
891 memcpy(scsi_sglist(scsicmd), buf,
892 cmdrsp->scsi.bufflen);
893 kfree(buf);
894 return;
895 }
896
897 sg = scsi_sglist(scsicmd);
898 for (i = 0; i < scsi_sg_count(scsicmd); i++) {
899 this_page_orig = kmap_atomic(sg_page(sg + i));
900 this_page = (void *)((unsigned long)this_page_orig |
901 sg[i].offset);
902 memcpy(this_page, buf + bufind, sg[i].length);
903 kunmap_atomic(this_page_orig);
904 }
905 kfree(buf);
906 } else {
907 vdisk = scsidev->hostdata;
908 if (atomic_read(&vdisk->ios_threshold) > 0) {
909 atomic_dec(&vdisk->ios_threshold);
910 if (atomic_read(&vdisk->ios_threshold) == 0)
911 atomic_set(&vdisk->error_count, 0);
912 }
913 }
914}
915
916
917
918
919
920
921
922
923
924static void complete_scsi_command(struct uiscmdrsp *cmdrsp,
925 struct scsi_cmnd *scsicmd)
926{
927
928 scsicmd->result = cmdrsp->scsi.linuxstat;
929 if (cmdrsp->scsi.linuxstat)
930 do_scsi_linuxstat(cmdrsp, scsicmd);
931 else
932 do_scsi_nolinuxstat(cmdrsp, scsicmd);
933
934 scsicmd->scsi_done(scsicmd);
935}
936
937
938
939
940
941
942
943
944static void drain_queue(struct uiscmdrsp *cmdrsp,
945 struct visorhba_devdata *devdata)
946{
947 struct scsi_cmnd *scsicmd;
948
949 while (1) {
950
951 if (visorchannel_signalremove(devdata->dev->visorchannel,
952 IOCHAN_FROM_IOPART,
953 cmdrsp))
954 break;
955 if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
956
957
958
959 scsicmd = del_scsipending_ent(devdata,
960 cmdrsp->scsi.handle);
961 if (!scsicmd)
962 break;
963
964 complete_scsi_command(cmdrsp, scsicmd);
965 } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
966 if (!del_scsipending_ent(devdata,
967 cmdrsp->scsitaskmgmt.handle))
968 break;
969 complete_taskmgmt_command(&devdata->idr, cmdrsp,
970 cmdrsp->scsitaskmgmt.result);
971 } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE)
972 dev_err_once(&devdata->dev->device,
973 "ignoring unsupported NOTIFYGUEST\n");
974
975 }
976}
977
978
979
980
981
982
983
984
985
986
987
988static int process_incoming_rsps(void *v)
989{
990 struct visorhba_devdata *devdata = v;
991 struct uiscmdrsp *cmdrsp = NULL;
992 const int size = sizeof(*cmdrsp);
993
994 cmdrsp = kmalloc(size, GFP_ATOMIC);
995 if (!cmdrsp)
996 return -ENOMEM;
997
998 while (1) {
999 if (kthread_should_stop())
1000 break;
1001 wait_event_interruptible_timeout(
1002 devdata->rsp_queue, (atomic_read(
1003 &devdata->interrupt_rcvd) == 1),
1004 msecs_to_jiffies(devdata->thread_wait_ms));
1005
1006 drain_queue(cmdrsp, devdata);
1007 }
1008 kfree(cmdrsp);
1009 return 0;
1010}
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023static int visorhba_pause(struct visor_device *dev,
1024 visorbus_state_complete_func complete_func)
1025{
1026 struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1027
1028 visorhba_serverdown(devdata);
1029 complete_func(dev, 0);
1030 return 0;
1031}
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043static int visorhba_resume(struct visor_device *dev,
1044 visorbus_state_complete_func complete_func)
1045{
1046 struct visorhba_devdata *devdata;
1047
1048 devdata = dev_get_drvdata(&dev->device);
1049 if (!devdata)
1050 return -EINVAL;
1051
1052 if (devdata->serverdown && !devdata->serverchangingstate)
1053 devdata->serverchangingstate = true;
1054
1055 devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1056 "vhba_incming");
1057 devdata->serverdown = false;
1058 devdata->serverchangingstate = false;
1059
1060 return 0;
1061}
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071static int visorhba_probe(struct visor_device *dev)
1072{
1073 struct Scsi_Host *scsihost;
1074 struct vhba_config_max max;
1075 struct visorhba_devdata *devdata = NULL;
1076 int err, channel_offset;
1077 u64 features;
1078
1079 scsihost = scsi_host_alloc(&visorhba_driver_template,
1080 sizeof(*devdata));
1081 if (!scsihost)
1082 return -ENODEV;
1083
1084 channel_offset = offsetof(struct visor_io_channel, vhba.max);
1085 err = visorbus_read_channel(dev, channel_offset, &max,
1086 sizeof(struct vhba_config_max));
1087 if (err < 0)
1088 goto err_scsi_host_put;
1089
1090 scsihost->max_id = (unsigned int)max.max_id;
1091 scsihost->max_lun = (unsigned int)max.max_lun;
1092 scsihost->cmd_per_lun = (unsigned int)max.cmd_per_lun;
1093 scsihost->max_sectors =
1094 (unsigned short)(max.max_io_size >> 9);
1095 scsihost->sg_tablesize =
1096 (unsigned short)(max.max_io_size / PAGE_SIZE);
1097 if (scsihost->sg_tablesize > MAX_PHYS_INFO)
1098 scsihost->sg_tablesize = MAX_PHYS_INFO;
1099 err = scsi_add_host(scsihost, &dev->device);
1100 if (err < 0)
1101 goto err_scsi_host_put;
1102
1103 devdata = (struct visorhba_devdata *)scsihost->hostdata;
1104 devdata->dev = dev;
1105 dev_set_drvdata(&dev->device, devdata);
1106
1107 devdata->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
1108 visorhba_debugfs_dir);
1109 if (!devdata->debugfs_dir) {
1110 err = -ENOMEM;
1111 goto err_scsi_remove_host;
1112 }
1113 devdata->debugfs_info =
1114 debugfs_create_file("info", 0440,
1115 devdata->debugfs_dir, devdata,
1116 &info_debugfs_fops);
1117 if (!devdata->debugfs_info) {
1118 err = -ENOMEM;
1119 goto err_debugfs_dir;
1120 }
1121
1122 init_waitqueue_head(&devdata->rsp_queue);
1123 spin_lock_init(&devdata->privlock);
1124 devdata->serverdown = false;
1125 devdata->serverchangingstate = false;
1126 devdata->scsihost = scsihost;
1127
1128 channel_offset = offsetof(struct visor_io_channel,
1129 channel_header.features);
1130 err = visorbus_read_channel(dev, channel_offset, &features, 8);
1131 if (err)
1132 goto err_debugfs_info;
1133 features |= VISOR_CHANNEL_IS_POLLING;
1134 err = visorbus_write_channel(dev, channel_offset, &features, 8);
1135 if (err)
1136 goto err_debugfs_info;
1137
1138 idr_init(&devdata->idr);
1139
1140 devdata->thread_wait_ms = 2;
1141 devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1142 "vhba_incoming");
1143
1144 scsi_scan_host(scsihost);
1145
1146 return 0;
1147
1148err_debugfs_info:
1149 debugfs_remove(devdata->debugfs_info);
1150
1151err_debugfs_dir:
1152 debugfs_remove_recursive(devdata->debugfs_dir);
1153
1154err_scsi_remove_host:
1155 scsi_remove_host(scsihost);
1156
1157err_scsi_host_put:
1158 scsi_host_put(scsihost);
1159 return err;
1160}
1161
1162
1163
1164
1165
1166
1167
1168static void visorhba_remove(struct visor_device *dev)
1169{
1170 struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1171 struct Scsi_Host *scsihost = NULL;
1172
1173 if (!devdata)
1174 return;
1175
1176 scsihost = devdata->scsihost;
1177 visor_thread_stop(devdata->thread);
1178 scsi_remove_host(scsihost);
1179 scsi_host_put(scsihost);
1180
1181 idr_destroy(&devdata->idr);
1182
1183 dev_set_drvdata(&dev->device, NULL);
1184 debugfs_remove(devdata->debugfs_info);
1185 debugfs_remove_recursive(devdata->debugfs_dir);
1186}
1187
1188
1189
1190
1191
1192static struct visor_driver visorhba_driver = {
1193 .name = "visorhba",
1194 .owner = THIS_MODULE,
1195 .channel_types = visorhba_channel_types,
1196 .probe = visorhba_probe,
1197 .remove = visorhba_remove,
1198 .pause = visorhba_pause,
1199 .resume = visorhba_resume,
1200 .channel_interrupt = NULL,
1201};
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211static int visorhba_init(void)
1212{
1213 int rc = -ENOMEM;
1214
1215 visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
1216 if (!visorhba_debugfs_dir)
1217 return -ENOMEM;
1218
1219 rc = visorbus_register_visor_driver(&visorhba_driver);
1220 if (rc)
1221 goto cleanup_debugfs;
1222
1223 return 0;
1224
1225cleanup_debugfs:
1226 debugfs_remove_recursive(visorhba_debugfs_dir);
1227
1228 return rc;
1229}
1230
1231
1232
1233
1234
1235
1236static void visorhba_exit(void)
1237{
1238 visorbus_unregister_visor_driver(&visorhba_driver);
1239 debugfs_remove_recursive(visorhba_debugfs_dir);
1240}
1241
1242module_init(visorhba_init);
1243module_exit(visorhba_exit);
1244
1245MODULE_AUTHOR("Unisys");
1246MODULE_LICENSE("GPL");
1247MODULE_DESCRIPTION("s-Par HBA driver for virtual SCSI host busses");
1248