1
2
3
4
5
6
7
8
9
10
11#include <linux/kernel.h>
12#include <linux/wait.h>
13#include <linux/sched.h>
14#include <linux/completion.h>
15#include <linux/string.h>
16#include <linux/mm.h>
17#include <linux/delay.h>
18#include <linux/init.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/device.h>
22#include <linux/hyperv.h>
23#include <linux/blkdev.h>
24#include <linux/dma-mapping.h>
25
26#include <scsi/scsi.h>
27#include <scsi/scsi_cmnd.h>
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_device.h>
30#include <scsi/scsi_tcq.h>
31#include <scsi/scsi_eh.h>
32#include <scsi/scsi_devinfo.h>
33#include <scsi/scsi_dbg.h>
34#include <scsi/scsi_transport_fc.h>
35#include <scsi/scsi_transport.h>
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55#define VMSTOR_PROTO_VERSION(MAJOR_, MINOR_) ((((MAJOR_) & 0xff) << 8) | \
56 (((MINOR_) & 0xff)))
57
58#define VMSTOR_PROTO_VERSION_WIN6 VMSTOR_PROTO_VERSION(2, 0)
59#define VMSTOR_PROTO_VERSION_WIN7 VMSTOR_PROTO_VERSION(4, 2)
60#define VMSTOR_PROTO_VERSION_WIN8 VMSTOR_PROTO_VERSION(5, 1)
61#define VMSTOR_PROTO_VERSION_WIN8_1 VMSTOR_PROTO_VERSION(6, 0)
62#define VMSTOR_PROTO_VERSION_WIN10 VMSTOR_PROTO_VERSION(6, 2)
63
64
65enum vstor_packet_operation {
66 VSTOR_OPERATION_COMPLETE_IO = 1,
67 VSTOR_OPERATION_REMOVE_DEVICE = 2,
68 VSTOR_OPERATION_EXECUTE_SRB = 3,
69 VSTOR_OPERATION_RESET_LUN = 4,
70 VSTOR_OPERATION_RESET_ADAPTER = 5,
71 VSTOR_OPERATION_RESET_BUS = 6,
72 VSTOR_OPERATION_BEGIN_INITIALIZATION = 7,
73 VSTOR_OPERATION_END_INITIALIZATION = 8,
74 VSTOR_OPERATION_QUERY_PROTOCOL_VERSION = 9,
75 VSTOR_OPERATION_QUERY_PROPERTIES = 10,
76 VSTOR_OPERATION_ENUMERATE_BUS = 11,
77 VSTOR_OPERATION_FCHBA_DATA = 12,
78 VSTOR_OPERATION_CREATE_SUB_CHANNELS = 13,
79 VSTOR_OPERATION_MAXIMUM = 13
80};
81
82
83
84
85
86struct hv_fc_wwn_packet {
87 u8 primary_active;
88 u8 reserved1[3];
89 u8 primary_port_wwn[8];
90 u8 primary_node_wwn[8];
91 u8 secondary_port_wwn[8];
92 u8 secondary_node_wwn[8];
93};
94
95
96
97
98
99
100
101#define SRB_FLAGS_QUEUE_ACTION_ENABLE 0x00000002
102#define SRB_FLAGS_DISABLE_DISCONNECT 0x00000004
103#define SRB_FLAGS_DISABLE_SYNCH_TRANSFER 0x00000008
104#define SRB_FLAGS_BYPASS_FROZEN_QUEUE 0x00000010
105#define SRB_FLAGS_DISABLE_AUTOSENSE 0x00000020
106#define SRB_FLAGS_DATA_IN 0x00000040
107#define SRB_FLAGS_DATA_OUT 0x00000080
108#define SRB_FLAGS_NO_DATA_TRANSFER 0x00000000
109#define SRB_FLAGS_UNSPECIFIED_DIRECTION (SRB_FLAGS_DATA_IN | SRB_FLAGS_DATA_OUT)
110#define SRB_FLAGS_NO_QUEUE_FREEZE 0x00000100
111#define SRB_FLAGS_ADAPTER_CACHE_ENABLE 0x00000200
112#define SRB_FLAGS_FREE_SENSE_BUFFER 0x00000400
113
114
115
116
117#define SRB_FLAGS_D3_PROCESSING 0x00000800
118#define SRB_FLAGS_IS_ACTIVE 0x00010000
119#define SRB_FLAGS_ALLOCATED_FROM_ZONE 0x00020000
120#define SRB_FLAGS_SGLIST_FROM_POOL 0x00040000
121#define SRB_FLAGS_BYPASS_LOCKED_QUEUE 0x00080000
122#define SRB_FLAGS_NO_KEEP_AWAKE 0x00100000
123#define SRB_FLAGS_PORT_DRIVER_ALLOCSENSE 0x00200000
124#define SRB_FLAGS_PORT_DRIVER_SENSEHASPORT 0x00400000
125#define SRB_FLAGS_DONT_START_NEXT_PACKET 0x00800000
126#define SRB_FLAGS_PORT_DRIVER_RESERVED 0x0F000000
127#define SRB_FLAGS_CLASS_DRIVER_RESERVED 0xF0000000
128
129#define SP_UNTAGGED ((unsigned char) ~0)
130#define SRB_SIMPLE_TAG_REQUEST 0x20
131
132
133
134
135
136
137#define STORVSC_MAX_CMD_LEN 0x10
138
139#define POST_WIN7_STORVSC_SENSE_BUFFER_SIZE 0x14
140#define PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE 0x12
141
142#define STORVSC_SENSE_BUFFER_SIZE 0x14
143#define STORVSC_MAX_BUF_LEN_WITH_PADDING 0x14
144
145
146
147
148
149
150
151static int sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
152
153
154
155
156
157
158static int vmstor_proto_version;
159
160#define STORVSC_LOGGING_NONE 0
161#define STORVSC_LOGGING_ERROR 1
162#define STORVSC_LOGGING_WARN 2
163
164static int logging_level = STORVSC_LOGGING_ERROR;
165module_param(logging_level, int, S_IRUGO|S_IWUSR);
166MODULE_PARM_DESC(logging_level,
167 "Logging level, 0 - None, 1 - Error (default), 2 - Warning.");
168
169static inline bool do_logging(int level)
170{
171 return logging_level >= level;
172}
173
174#define storvsc_log(dev, level, fmt, ...) \
175do { \
176 if (do_logging(level)) \
177 dev_warn(&(dev)->device, fmt, ##__VA_ARGS__); \
178} while (0)
179
180struct vmscsi_win8_extension {
181
182
183
184 u16 reserve;
185 u8 queue_tag;
186 u8 queue_action;
187 u32 srb_flags;
188 u32 time_out_value;
189 u32 queue_sort_ey;
190} __packed;
191
192struct vmscsi_request {
193 u16 length;
194 u8 srb_status;
195 u8 scsi_status;
196
197 u8 port_number;
198 u8 path_id;
199 u8 target_id;
200 u8 lun;
201
202 u8 cdb_length;
203 u8 sense_info_length;
204 u8 data_in;
205 u8 reserved;
206
207 u32 data_transfer_length;
208
209 union {
210 u8 cdb[STORVSC_MAX_CMD_LEN];
211 u8 sense_data[STORVSC_SENSE_BUFFER_SIZE];
212 u8 reserved_array[STORVSC_MAX_BUF_LEN_WITH_PADDING];
213 };
214
215
216
217 struct vmscsi_win8_extension win8_extension;
218
219} __attribute((packed));
220
221
222
223
224struct vmstor_protocol {
225 int protocol_version;
226 int sense_buffer_size;
227 int vmscsi_size_delta;
228};
229
230
231static const struct vmstor_protocol vmstor_protocols[] = {
232 {
233 VMSTOR_PROTO_VERSION_WIN10,
234 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
235 0
236 },
237 {
238 VMSTOR_PROTO_VERSION_WIN8_1,
239 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
240 0
241 },
242 {
243 VMSTOR_PROTO_VERSION_WIN8,
244 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
245 0
246 },
247 {
248 VMSTOR_PROTO_VERSION_WIN7,
249 PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
250 sizeof(struct vmscsi_win8_extension),
251 },
252 {
253 VMSTOR_PROTO_VERSION_WIN6,
254 PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
255 sizeof(struct vmscsi_win8_extension),
256 }
257};
258
259
260
261
262
263
264
265#define STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL 0x1
266
267struct vmstorage_channel_properties {
268 u32 reserved;
269 u16 max_channel_cnt;
270 u16 reserved1;
271
272 u32 flags;
273 u32 max_transfer_bytes;
274
275 u64 reserved2;
276} __packed;
277
278
279struct vmstorage_protocol_version {
280
281 u16 major_minor;
282
283
284
285
286
287
288
289
290 u16 revision;
291} __packed;
292
293
294#define STORAGE_CHANNEL_REMOVABLE_FLAG 0x1
295#define STORAGE_CHANNEL_EMULATED_IDE_FLAG 0x2
296
297struct vstor_packet {
298
299 enum vstor_packet_operation operation;
300
301
302 u32 flags;
303
304
305 u32 status;
306
307
308 union {
309
310
311
312
313 struct vmscsi_request vm_srb;
314
315
316 struct vmstorage_channel_properties storage_channel_properties;
317
318
319 struct vmstorage_protocol_version version;
320
321
322 struct hv_fc_wwn_packet wwn_packet;
323
324
325 u16 sub_channel_count;
326
327
328 u8 buffer[0x34];
329 };
330} __packed;
331
332
333
334
335
336
337
338
339#define REQUEST_COMPLETION_FLAG 0x1
340
341
342enum storvsc_request_type {
343 WRITE_TYPE = 0,
344 READ_TYPE,
345 UNKNOWN_TYPE,
346};
347
348
349
350
351
352#define SRB_STATUS_AUTOSENSE_VALID 0x80
353#define SRB_STATUS_QUEUE_FROZEN 0x40
354#define SRB_STATUS_INVALID_LUN 0x20
355#define SRB_STATUS_SUCCESS 0x01
356#define SRB_STATUS_ABORTED 0x02
357#define SRB_STATUS_ERROR 0x04
358#define SRB_STATUS_DATA_OVERRUN 0x12
359
360#define SRB_STATUS(status) \
361 (status & ~(SRB_STATUS_AUTOSENSE_VALID | SRB_STATUS_QUEUE_FROZEN))
362
363
364
365
366static int storvsc_ringbuffer_size = (128 * 1024);
367static u32 max_outstanding_req_per_channel;
368static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth);
369
370static int storvsc_vcpus_per_sub_channel = 4;
371static unsigned int storvsc_max_hw_queues;
372
373module_param(storvsc_ringbuffer_size, int, S_IRUGO);
374MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
375
376module_param(storvsc_max_hw_queues, uint, 0644);
377MODULE_PARM_DESC(storvsc_max_hw_queues, "Maximum number of hardware queues");
378
379module_param(storvsc_vcpus_per_sub_channel, int, S_IRUGO);
380MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels");
381
382static int ring_avail_percent_lowater = 10;
383module_param(ring_avail_percent_lowater, int, S_IRUGO);
384MODULE_PARM_DESC(ring_avail_percent_lowater,
385 "Select a channel if available ring size > this in percent");
386
387
388
389
390static int storvsc_timeout = 180;
391
392#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
393static struct scsi_transport_template *fc_transport_template;
394#endif
395
396static struct scsi_host_template scsi_driver;
397static void storvsc_on_channel_callback(void *context);
398
399#define STORVSC_MAX_LUNS_PER_TARGET 255
400#define STORVSC_MAX_TARGETS 2
401#define STORVSC_MAX_CHANNELS 8
402
403#define STORVSC_FC_MAX_LUNS_PER_TARGET 255
404#define STORVSC_FC_MAX_TARGETS 128
405#define STORVSC_FC_MAX_CHANNELS 8
406
407#define STORVSC_IDE_MAX_LUNS_PER_TARGET 64
408#define STORVSC_IDE_MAX_TARGETS 1
409#define STORVSC_IDE_MAX_CHANNELS 1
410
411
412
413
414
415
416#define STORVSC_MAX_PKT_SIZE (sizeof(struct vmpacket_descriptor) +\
417 sizeof(struct vstor_packet))
418
419struct storvsc_cmd_request {
420 struct scsi_cmnd *cmd;
421
422 struct hv_device *device;
423
424
425 struct completion wait_event;
426
427 struct vmbus_channel_packet_multipage_buffer mpb;
428 struct vmbus_packet_mpb_array *payload;
429 u32 payload_sz;
430
431 struct vstor_packet vstor_packet;
432};
433
434
435
436struct storvsc_device {
437 struct hv_device *device;
438
439 bool destroy;
440 bool drain_notify;
441 atomic_t num_outstanding_req;
442 struct Scsi_Host *host;
443
444 wait_queue_head_t waiting_to_drain;
445
446
447
448
449
450
451 unsigned int port_number;
452 unsigned char path_id;
453 unsigned char target_id;
454
455
456
457
458
459
460
461
462
463
464 int vmscsi_size_delta;
465
466
467
468
469 u32 max_transfer_bytes;
470
471
472
473 u16 num_sc;
474 struct vmbus_channel **stor_chns;
475
476
477
478 struct cpumask alloced_cpus;
479
480
481
482
483 spinlock_t lock;
484
485 struct storvsc_cmd_request init_request;
486 struct storvsc_cmd_request reset_request;
487
488
489
490 u64 node_name;
491 u64 port_name;
492#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
493 struct fc_rport *rport;
494#endif
495};
496
497struct hv_host_device {
498 struct hv_device *dev;
499 unsigned int port;
500 unsigned char path;
501 unsigned char target;
502 struct workqueue_struct *handle_error_wq;
503 struct work_struct host_scan_work;
504 struct Scsi_Host *host;
505};
506
507struct storvsc_scan_work {
508 struct work_struct work;
509 struct Scsi_Host *host;
510 u8 lun;
511 u8 tgt_id;
512};
513
514static void storvsc_device_scan(struct work_struct *work)
515{
516 struct storvsc_scan_work *wrk;
517 struct scsi_device *sdev;
518
519 wrk = container_of(work, struct storvsc_scan_work, work);
520
521 sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
522 if (!sdev)
523 goto done;
524 scsi_rescan_device(&sdev->sdev_gendev);
525 scsi_device_put(sdev);
526
527done:
528 kfree(wrk);
529}
530
531static void storvsc_host_scan(struct work_struct *work)
532{
533 struct Scsi_Host *host;
534 struct scsi_device *sdev;
535 struct hv_host_device *host_device =
536 container_of(work, struct hv_host_device, host_scan_work);
537
538 host = host_device->host;
539
540
541
542
543
544
545
546
547
548
549
550 mutex_lock(&host->scan_mutex);
551 shost_for_each_device(sdev, host)
552 scsi_test_unit_ready(sdev, 1, 1, NULL);
553 mutex_unlock(&host->scan_mutex);
554
555
556
557 scsi_scan_host(host);
558}
559
560static void storvsc_remove_lun(struct work_struct *work)
561{
562 struct storvsc_scan_work *wrk;
563 struct scsi_device *sdev;
564
565 wrk = container_of(work, struct storvsc_scan_work, work);
566 if (!scsi_host_get(wrk->host))
567 goto done;
568
569 sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
570
571 if (sdev) {
572 scsi_remove_device(sdev);
573 scsi_device_put(sdev);
574 }
575 scsi_host_put(wrk->host);
576
577done:
578 kfree(wrk);
579}
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597static inline struct storvsc_device *get_out_stor_device(
598 struct hv_device *device)
599{
600 struct storvsc_device *stor_device;
601
602 stor_device = hv_get_drvdata(device);
603
604 if (stor_device && stor_device->destroy)
605 stor_device = NULL;
606
607 return stor_device;
608}
609
610
611static inline void storvsc_wait_to_drain(struct storvsc_device *dev)
612{
613 dev->drain_notify = true;
614 wait_event(dev->waiting_to_drain,
615 atomic_read(&dev->num_outstanding_req) == 0);
616 dev->drain_notify = false;
617}
618
619static inline struct storvsc_device *get_in_stor_device(
620 struct hv_device *device)
621{
622 struct storvsc_device *stor_device;
623
624 stor_device = hv_get_drvdata(device);
625
626 if (!stor_device)
627 goto get_in_err;
628
629
630
631
632
633
634 if (stor_device->destroy &&
635 (atomic_read(&stor_device->num_outstanding_req) == 0))
636 stor_device = NULL;
637
638get_in_err:
639 return stor_device;
640
641}
642
643static void storvsc_change_target_cpu(struct vmbus_channel *channel, u32 old,
644 u32 new)
645{
646 struct storvsc_device *stor_device;
647 struct vmbus_channel *cur_chn;
648 bool old_is_alloced = false;
649 struct hv_device *device;
650 unsigned long flags;
651 int cpu;
652
653 device = channel->primary_channel ?
654 channel->primary_channel->device_obj
655 : channel->device_obj;
656 stor_device = get_out_stor_device(device);
657 if (!stor_device)
658 return;
659
660
661 spin_lock_irqsave(&stor_device->lock, flags);
662
663
664
665
666
667
668 if (device->channel != channel && device->channel->target_cpu == old) {
669 cur_chn = device->channel;
670 old_is_alloced = true;
671 goto old_is_alloced;
672 }
673 list_for_each_entry(cur_chn, &device->channel->sc_list, sc_list) {
674 if (cur_chn == channel)
675 continue;
676 if (cur_chn->target_cpu == old) {
677 old_is_alloced = true;
678 goto old_is_alloced;
679 }
680 }
681
682old_is_alloced:
683 if (old_is_alloced)
684 WRITE_ONCE(stor_device->stor_chns[old], cur_chn);
685 else
686 cpumask_clear_cpu(old, &stor_device->alloced_cpus);
687
688
689 for_each_possible_cpu(cpu) {
690 if (stor_device->stor_chns[cpu] && !cpumask_test_cpu(
691 cpu, &stor_device->alloced_cpus))
692 WRITE_ONCE(stor_device->stor_chns[cpu], NULL);
693 }
694
695 WRITE_ONCE(stor_device->stor_chns[new], channel);
696 cpumask_set_cpu(new, &stor_device->alloced_cpus);
697
698 spin_unlock_irqrestore(&stor_device->lock, flags);
699}
700
701static u64 storvsc_next_request_id(struct vmbus_channel *channel, u64 rqst_addr)
702{
703 struct storvsc_cmd_request *request =
704 (struct storvsc_cmd_request *)(unsigned long)rqst_addr;
705
706 if (rqst_addr == VMBUS_RQST_INIT)
707 return VMBUS_RQST_INIT;
708 if (rqst_addr == VMBUS_RQST_RESET)
709 return VMBUS_RQST_RESET;
710
711
712
713
714
715 return (u64)blk_mq_unique_tag(scsi_cmd_to_rq(request->cmd)) + 1;
716}
717
718static void handle_sc_creation(struct vmbus_channel *new_sc)
719{
720 struct hv_device *device = new_sc->primary_channel->device_obj;
721 struct device *dev = &device->device;
722 struct storvsc_device *stor_device;
723 struct vmstorage_channel_properties props;
724 int ret;
725
726 stor_device = get_out_stor_device(device);
727 if (!stor_device)
728 return;
729
730 memset(&props, 0, sizeof(struct vmstorage_channel_properties));
731 new_sc->max_pkt_size = STORVSC_MAX_PKT_SIZE;
732
733 new_sc->next_request_id_callback = storvsc_next_request_id;
734
735 ret = vmbus_open(new_sc,
736 storvsc_ringbuffer_size,
737 storvsc_ringbuffer_size,
738 (void *)&props,
739 sizeof(struct vmstorage_channel_properties),
740 storvsc_on_channel_callback, new_sc);
741
742
743 if (ret != 0) {
744 dev_err(dev, "Failed to open sub-channel: err=%d\n", ret);
745 return;
746 }
747
748 new_sc->change_target_cpu_callback = storvsc_change_target_cpu;
749
750
751 stor_device->stor_chns[new_sc->target_cpu] = new_sc;
752 cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus);
753}
754
755static void handle_multichannel_storage(struct hv_device *device, int max_chns)
756{
757 struct device *dev = &device->device;
758 struct storvsc_device *stor_device;
759 int num_sc;
760 struct storvsc_cmd_request *request;
761 struct vstor_packet *vstor_packet;
762 int ret, t;
763
764
765
766
767
768
769
770
771 num_sc = min((int)(num_online_cpus() - 1), max_chns);
772 if (!num_sc)
773 return;
774
775 stor_device = get_out_stor_device(device);
776 if (!stor_device)
777 return;
778
779 stor_device->num_sc = num_sc;
780 request = &stor_device->init_request;
781 vstor_packet = &request->vstor_packet;
782
783
784
785
786 vmbus_set_sc_create_callback(device->channel, handle_sc_creation);
787
788
789
790
791 memset(request, 0, sizeof(struct storvsc_cmd_request));
792 init_completion(&request->wait_event);
793 vstor_packet->operation = VSTOR_OPERATION_CREATE_SUB_CHANNELS;
794 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
795 vstor_packet->sub_channel_count = num_sc;
796
797 ret = vmbus_sendpacket(device->channel, vstor_packet,
798 (sizeof(struct vstor_packet) -
799 stor_device->vmscsi_size_delta),
800 VMBUS_RQST_INIT,
801 VM_PKT_DATA_INBAND,
802 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
803
804 if (ret != 0) {
805 dev_err(dev, "Failed to create sub-channel: err=%d\n", ret);
806 return;
807 }
808
809 t = wait_for_completion_timeout(&request->wait_event, 10*HZ);
810 if (t == 0) {
811 dev_err(dev, "Failed to create sub-channel: timed out\n");
812 return;
813 }
814
815 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
816 vstor_packet->status != 0) {
817 dev_err(dev, "Failed to create sub-channel: op=%d, sts=%d\n",
818 vstor_packet->operation, vstor_packet->status);
819 return;
820 }
821
822
823
824
825
826
827}
828
829static void cache_wwn(struct storvsc_device *stor_device,
830 struct vstor_packet *vstor_packet)
831{
832
833
834
835 if (vstor_packet->wwn_packet.primary_active) {
836 stor_device->node_name =
837 wwn_to_u64(vstor_packet->wwn_packet.primary_node_wwn);
838 stor_device->port_name =
839 wwn_to_u64(vstor_packet->wwn_packet.primary_port_wwn);
840 } else {
841 stor_device->node_name =
842 wwn_to_u64(vstor_packet->wwn_packet.secondary_node_wwn);
843 stor_device->port_name =
844 wwn_to_u64(vstor_packet->wwn_packet.secondary_port_wwn);
845 }
846}
847
848
849static int storvsc_execute_vstor_op(struct hv_device *device,
850 struct storvsc_cmd_request *request,
851 bool status_check)
852{
853 struct storvsc_device *stor_device;
854 struct vstor_packet *vstor_packet;
855 int ret, t;
856
857 stor_device = get_out_stor_device(device);
858 if (!stor_device)
859 return -ENODEV;
860
861 vstor_packet = &request->vstor_packet;
862
863 init_completion(&request->wait_event);
864 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
865
866 ret = vmbus_sendpacket(device->channel, vstor_packet,
867 (sizeof(struct vstor_packet) -
868 stor_device->vmscsi_size_delta),
869 VMBUS_RQST_INIT,
870 VM_PKT_DATA_INBAND,
871 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
872 if (ret != 0)
873 return ret;
874
875 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
876 if (t == 0)
877 return -ETIMEDOUT;
878
879 if (!status_check)
880 return ret;
881
882 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
883 vstor_packet->status != 0)
884 return -EINVAL;
885
886 return ret;
887}
888
889static int storvsc_channel_init(struct hv_device *device, bool is_fc)
890{
891 struct storvsc_device *stor_device;
892 struct storvsc_cmd_request *request;
893 struct vstor_packet *vstor_packet;
894 int ret, i;
895 int max_chns;
896 bool process_sub_channels = false;
897
898 stor_device = get_out_stor_device(device);
899 if (!stor_device)
900 return -ENODEV;
901
902 request = &stor_device->init_request;
903 vstor_packet = &request->vstor_packet;
904
905
906
907
908
909 memset(request, 0, sizeof(struct storvsc_cmd_request));
910 vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
911 ret = storvsc_execute_vstor_op(device, request, true);
912 if (ret)
913 return ret;
914
915
916
917
918 for (i = 0; i < ARRAY_SIZE(vmstor_protocols); i++) {
919
920 memset(vstor_packet, 0, sizeof(struct vstor_packet));
921 vstor_packet->operation =
922 VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
923
924 vstor_packet->version.major_minor =
925 vmstor_protocols[i].protocol_version;
926
927
928
929
930 vstor_packet->version.revision = 0;
931 ret = storvsc_execute_vstor_op(device, request, false);
932 if (ret != 0)
933 return ret;
934
935 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO)
936 return -EINVAL;
937
938 if (vstor_packet->status == 0) {
939 vmstor_proto_version =
940 vmstor_protocols[i].protocol_version;
941
942 sense_buffer_size =
943 vmstor_protocols[i].sense_buffer_size;
944
945 stor_device->vmscsi_size_delta =
946 vmstor_protocols[i].vmscsi_size_delta;
947
948 break;
949 }
950 }
951
952 if (vstor_packet->status != 0)
953 return -EINVAL;
954
955
956 memset(vstor_packet, 0, sizeof(struct vstor_packet));
957 vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES;
958 ret = storvsc_execute_vstor_op(device, request, true);
959 if (ret != 0)
960 return ret;
961
962
963
964
965
966
967 max_chns = vstor_packet->storage_channel_properties.max_channel_cnt;
968
969
970
971
972
973
974
975
976
977
978 stor_device->stor_chns = kcalloc(num_possible_cpus(), sizeof(void *),
979 GFP_KERNEL);
980 if (stor_device->stor_chns == NULL)
981 return -ENOMEM;
982
983 device->channel->change_target_cpu_callback = storvsc_change_target_cpu;
984
985 stor_device->stor_chns[device->channel->target_cpu] = device->channel;
986 cpumask_set_cpu(device->channel->target_cpu,
987 &stor_device->alloced_cpus);
988
989 if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN8) {
990 if (vstor_packet->storage_channel_properties.flags &
991 STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL)
992 process_sub_channels = true;
993 }
994 stor_device->max_transfer_bytes =
995 vstor_packet->storage_channel_properties.max_transfer_bytes;
996
997 if (!is_fc)
998 goto done;
999
1000
1001
1002
1003 memset(vstor_packet, 0, sizeof(struct vstor_packet));
1004 vstor_packet->operation = VSTOR_OPERATION_FCHBA_DATA;
1005 ret = storvsc_execute_vstor_op(device, request, true);
1006 if (ret != 0)
1007 return ret;
1008
1009
1010
1011
1012 cache_wwn(stor_device, vstor_packet);
1013
1014done:
1015
1016 memset(vstor_packet, 0, sizeof(struct vstor_packet));
1017 vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
1018 ret = storvsc_execute_vstor_op(device, request, true);
1019 if (ret != 0)
1020 return ret;
1021
1022 if (process_sub_channels)
1023 handle_multichannel_storage(device, max_chns);
1024
1025 return ret;
1026}
1027
1028static void storvsc_handle_error(struct vmscsi_request *vm_srb,
1029 struct scsi_cmnd *scmnd,
1030 struct Scsi_Host *host,
1031 u8 asc, u8 ascq)
1032{
1033 struct storvsc_scan_work *wrk;
1034 void (*process_err_fn)(struct work_struct *work);
1035 struct hv_host_device *host_dev = shost_priv(host);
1036
1037
1038
1039
1040
1041
1042
1043 if (vm_srb->srb_status & SRB_STATUS_INVALID_LUN) {
1044 set_host_byte(scmnd, DID_NO_CONNECT);
1045 process_err_fn = storvsc_remove_lun;
1046 goto do_work;
1047 }
1048
1049 if (vm_srb->srb_status & SRB_STATUS_ABORTED) {
1050 if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID &&
1051
1052 (asc == 0x2a) && (ascq == 0x9)) {
1053 process_err_fn = storvsc_device_scan;
1054
1055
1056
1057 set_host_byte(scmnd, DID_REQUEUE);
1058 goto do_work;
1059 }
1060 }
1061
1062 if (vm_srb->srb_status & SRB_STATUS_ERROR) {
1063
1064
1065
1066
1067 if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)
1068 return;
1069
1070
1071
1072
1073
1074
1075
1076 switch (scmnd->cmnd[0]) {
1077 case ATA_16:
1078 case ATA_12:
1079 set_host_byte(scmnd, DID_PASSTHROUGH);
1080 break;
1081
1082
1083
1084
1085
1086 case TEST_UNIT_READY:
1087 break;
1088 default:
1089 set_host_byte(scmnd, DID_ERROR);
1090 }
1091 }
1092 return;
1093
1094do_work:
1095
1096
1097
1098 wrk = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
1099 if (!wrk) {
1100 set_host_byte(scmnd, DID_TARGET_FAILURE);
1101 return;
1102 }
1103
1104 wrk->host = host;
1105 wrk->lun = vm_srb->lun;
1106 wrk->tgt_id = vm_srb->target_id;
1107 INIT_WORK(&wrk->work, process_err_fn);
1108 queue_work(host_dev->handle_error_wq, &wrk->work);
1109}
1110
1111
1112static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request,
1113 struct storvsc_device *stor_dev)
1114{
1115 struct scsi_cmnd *scmnd = cmd_request->cmd;
1116 struct scsi_sense_hdr sense_hdr;
1117 struct vmscsi_request *vm_srb;
1118 u32 data_transfer_length;
1119 struct Scsi_Host *host;
1120 u32 payload_sz = cmd_request->payload_sz;
1121 void *payload = cmd_request->payload;
1122 bool sense_ok;
1123
1124 host = stor_dev->host;
1125
1126 vm_srb = &cmd_request->vstor_packet.vm_srb;
1127 data_transfer_length = vm_srb->data_transfer_length;
1128
1129 scmnd->result = vm_srb->scsi_status;
1130
1131 if (scmnd->result) {
1132 sense_ok = scsi_normalize_sense(scmnd->sense_buffer,
1133 SCSI_SENSE_BUFFERSIZE, &sense_hdr);
1134
1135 if (sense_ok && do_logging(STORVSC_LOGGING_WARN))
1136 scsi_print_sense_hdr(scmnd->device, "storvsc",
1137 &sense_hdr);
1138 }
1139
1140 if (vm_srb->srb_status != SRB_STATUS_SUCCESS) {
1141 storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc,
1142 sense_hdr.ascq);
1143
1144
1145
1146
1147
1148 if (vm_srb->srb_status != SRB_STATUS_DATA_OVERRUN)
1149 data_transfer_length = 0;
1150 }
1151
1152
1153 if (data_transfer_length > cmd_request->payload->range.len)
1154 data_transfer_length = cmd_request->payload->range.len;
1155
1156 scsi_set_resid(scmnd,
1157 cmd_request->payload->range.len - data_transfer_length);
1158
1159 scsi_done(scmnd);
1160
1161 if (payload_sz >
1162 sizeof(struct vmbus_channel_packet_multipage_buffer))
1163 kfree(payload);
1164}
1165
1166static void storvsc_on_io_completion(struct storvsc_device *stor_device,
1167 struct vstor_packet *vstor_packet,
1168 struct storvsc_cmd_request *request)
1169{
1170 struct vstor_packet *stor_pkt;
1171 struct hv_device *device = stor_device->device;
1172
1173 stor_pkt = &request->vstor_packet;
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186 if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) ||
1187 (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) {
1188 vstor_packet->vm_srb.scsi_status = 0;
1189 vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS;
1190 }
1191
1192
1193 stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status;
1194 stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status;
1195
1196
1197
1198
1199
1200 stor_pkt->vm_srb.sense_info_length = min_t(u8, sense_buffer_size,
1201 vstor_packet->vm_srb.sense_info_length);
1202
1203 if (vstor_packet->vm_srb.scsi_status != 0 ||
1204 vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS) {
1205
1206
1207
1208
1209
1210
1211 int loglevel = (stor_pkt->vm_srb.cdb[0] == TEST_UNIT_READY) ?
1212 STORVSC_LOGGING_WARN : STORVSC_LOGGING_ERROR;
1213
1214 storvsc_log(device, loglevel,
1215 "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n",
1216 scsi_cmd_to_rq(request->cmd)->tag,
1217 stor_pkt->vm_srb.cdb[0],
1218 vstor_packet->vm_srb.scsi_status,
1219 vstor_packet->vm_srb.srb_status,
1220 vstor_packet->status);
1221 }
1222
1223 if (vstor_packet->vm_srb.scsi_status == SAM_STAT_CHECK_CONDITION &&
1224 (vstor_packet->vm_srb.srb_status & SRB_STATUS_AUTOSENSE_VALID))
1225 memcpy(request->cmd->sense_buffer,
1226 vstor_packet->vm_srb.sense_data,
1227 stor_pkt->vm_srb.sense_info_length);
1228
1229 stor_pkt->vm_srb.data_transfer_length =
1230 vstor_packet->vm_srb.data_transfer_length;
1231
1232 storvsc_command_completion(request, stor_device);
1233
1234 if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
1235 stor_device->drain_notify)
1236 wake_up(&stor_device->waiting_to_drain);
1237}
1238
1239static void storvsc_on_receive(struct storvsc_device *stor_device,
1240 struct vstor_packet *vstor_packet,
1241 struct storvsc_cmd_request *request)
1242{
1243 struct hv_host_device *host_dev;
1244 switch (vstor_packet->operation) {
1245 case VSTOR_OPERATION_COMPLETE_IO:
1246 storvsc_on_io_completion(stor_device, vstor_packet, request);
1247 break;
1248
1249 case VSTOR_OPERATION_REMOVE_DEVICE:
1250 case VSTOR_OPERATION_ENUMERATE_BUS:
1251 host_dev = shost_priv(stor_device->host);
1252 queue_work(
1253 host_dev->handle_error_wq, &host_dev->host_scan_work);
1254 break;
1255
1256 case VSTOR_OPERATION_FCHBA_DATA:
1257 cache_wwn(stor_device, vstor_packet);
1258#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
1259 fc_host_node_name(stor_device->host) = stor_device->node_name;
1260 fc_host_port_name(stor_device->host) = stor_device->port_name;
1261#endif
1262 break;
1263 default:
1264 break;
1265 }
1266}
1267
1268static void storvsc_on_channel_callback(void *context)
1269{
1270 struct vmbus_channel *channel = (struct vmbus_channel *)context;
1271 const struct vmpacket_descriptor *desc;
1272 struct hv_device *device;
1273 struct storvsc_device *stor_device;
1274 struct Scsi_Host *shost;
1275
1276 if (channel->primary_channel != NULL)
1277 device = channel->primary_channel->device_obj;
1278 else
1279 device = channel->device_obj;
1280
1281 stor_device = get_in_stor_device(device);
1282 if (!stor_device)
1283 return;
1284
1285 shost = stor_device->host;
1286
1287 foreach_vmbus_pkt(desc, channel) {
1288 struct vstor_packet *packet = hv_pkt_data(desc);
1289 struct storvsc_cmd_request *request = NULL;
1290 u32 pktlen = hv_pkt_datalen(desc);
1291 u64 rqst_id = desc->trans_id;
1292 u32 minlen = rqst_id ? sizeof(struct vstor_packet) -
1293 stor_device->vmscsi_size_delta : sizeof(enum vstor_packet_operation);
1294
1295 if (pktlen < minlen) {
1296 dev_err(&device->device,
1297 "Invalid pkt: id=%llu, len=%u, minlen=%u\n",
1298 rqst_id, pktlen, minlen);
1299 continue;
1300 }
1301
1302 if (rqst_id == VMBUS_RQST_INIT) {
1303 request = &stor_device->init_request;
1304 } else if (rqst_id == VMBUS_RQST_RESET) {
1305 request = &stor_device->reset_request;
1306 } else {
1307
1308 if (rqst_id == 0) {
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326 if (packet->operation == VSTOR_OPERATION_COMPLETE_IO ||
1327 packet->operation == VSTOR_OPERATION_FCHBA_DATA) {
1328 dev_err(&device->device, "Invalid packet with ID of 0\n");
1329 continue;
1330 }
1331 } else {
1332 struct scsi_cmnd *scmnd;
1333
1334
1335 scmnd = scsi_host_find_tag(shost, rqst_id - 1);
1336 if (scmnd == NULL) {
1337 dev_err(&device->device, "Incorrect transaction ID\n");
1338 continue;
1339 }
1340 request = (struct storvsc_cmd_request *)scsi_cmd_priv(scmnd);
1341 scsi_dma_unmap(scmnd);
1342 }
1343
1344 storvsc_on_receive(stor_device, packet, request);
1345 continue;
1346 }
1347
1348 memcpy(&request->vstor_packet, packet,
1349 (sizeof(struct vstor_packet) - stor_device->vmscsi_size_delta));
1350 complete(&request->wait_event);
1351 }
1352}
1353
1354static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size,
1355 bool is_fc)
1356{
1357 struct vmstorage_channel_properties props;
1358 int ret;
1359
1360 memset(&props, 0, sizeof(struct vmstorage_channel_properties));
1361
1362 device->channel->max_pkt_size = STORVSC_MAX_PKT_SIZE;
1363 device->channel->next_request_id_callback = storvsc_next_request_id;
1364
1365 ret = vmbus_open(device->channel,
1366 ring_size,
1367 ring_size,
1368 (void *)&props,
1369 sizeof(struct vmstorage_channel_properties),
1370 storvsc_on_channel_callback, device->channel);
1371
1372 if (ret != 0)
1373 return ret;
1374
1375 ret = storvsc_channel_init(device, is_fc);
1376
1377 return ret;
1378}
1379
1380static int storvsc_dev_remove(struct hv_device *device)
1381{
1382 struct storvsc_device *stor_device;
1383
1384 stor_device = hv_get_drvdata(device);
1385
1386 stor_device->destroy = true;
1387
1388
1389 wmb();
1390
1391
1392
1393
1394
1395
1396
1397 storvsc_wait_to_drain(stor_device);
1398
1399
1400
1401
1402
1403
1404
1405
1406 hv_set_drvdata(device, NULL);
1407
1408
1409 vmbus_close(device->channel);
1410
1411 kfree(stor_device->stor_chns);
1412 kfree(stor_device);
1413 return 0;
1414}
1415
1416static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
1417 u16 q_num)
1418{
1419 u16 slot = 0;
1420 u16 hash_qnum;
1421 const struct cpumask *node_mask;
1422 int num_channels, tgt_cpu;
1423
1424 if (stor_device->num_sc == 0) {
1425 stor_device->stor_chns[q_num] = stor_device->device->channel;
1426 return stor_device->device->channel;
1427 }
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438 node_mask = cpumask_of_node(cpu_to_node(q_num));
1439
1440 num_channels = 0;
1441 for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) {
1442 if (cpumask_test_cpu(tgt_cpu, node_mask))
1443 num_channels++;
1444 }
1445 if (num_channels == 0) {
1446 stor_device->stor_chns[q_num] = stor_device->device->channel;
1447 return stor_device->device->channel;
1448 }
1449
1450 hash_qnum = q_num;
1451 while (hash_qnum >= num_channels)
1452 hash_qnum -= num_channels;
1453
1454 for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) {
1455 if (!cpumask_test_cpu(tgt_cpu, node_mask))
1456 continue;
1457 if (slot == hash_qnum)
1458 break;
1459 slot++;
1460 }
1461
1462 stor_device->stor_chns[q_num] = stor_device->stor_chns[tgt_cpu];
1463
1464 return stor_device->stor_chns[q_num];
1465}
1466
1467
1468static int storvsc_do_io(struct hv_device *device,
1469 struct storvsc_cmd_request *request, u16 q_num)
1470{
1471 struct storvsc_device *stor_device;
1472 struct vstor_packet *vstor_packet;
1473 struct vmbus_channel *outgoing_channel, *channel;
1474 unsigned long flags;
1475 int ret = 0;
1476 const struct cpumask *node_mask;
1477 int tgt_cpu;
1478
1479 vstor_packet = &request->vstor_packet;
1480 stor_device = get_out_stor_device(device);
1481
1482 if (!stor_device)
1483 return -ENODEV;
1484
1485
1486 request->device = device;
1487
1488
1489
1490
1491 outgoing_channel = READ_ONCE(stor_device->stor_chns[q_num]);
1492 if (outgoing_channel != NULL) {
1493 if (outgoing_channel->target_cpu == q_num) {
1494
1495
1496
1497
1498 node_mask = cpumask_of_node(cpu_to_node(q_num));
1499 for_each_cpu_wrap(tgt_cpu,
1500 &stor_device->alloced_cpus, q_num + 1) {
1501 if (!cpumask_test_cpu(tgt_cpu, node_mask))
1502 continue;
1503 if (tgt_cpu == q_num)
1504 continue;
1505 channel = READ_ONCE(
1506 stor_device->stor_chns[tgt_cpu]);
1507 if (channel == NULL)
1508 continue;
1509 if (hv_get_avail_to_write_percent(
1510 &channel->outbound)
1511 > ring_avail_percent_lowater) {
1512 outgoing_channel = channel;
1513 goto found_channel;
1514 }
1515 }
1516
1517
1518
1519
1520
1521 if (hv_get_avail_to_write_percent(
1522 &outgoing_channel->outbound)
1523 > ring_avail_percent_lowater)
1524 goto found_channel;
1525
1526
1527
1528
1529
1530
1531 for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) {
1532 if (cpumask_test_cpu(tgt_cpu, node_mask))
1533 continue;
1534 channel = READ_ONCE(
1535 stor_device->stor_chns[tgt_cpu]);
1536 if (channel == NULL)
1537 continue;
1538 if (hv_get_avail_to_write_percent(
1539 &channel->outbound)
1540 > ring_avail_percent_lowater) {
1541 outgoing_channel = channel;
1542 goto found_channel;
1543 }
1544 }
1545 }
1546 } else {
1547 spin_lock_irqsave(&stor_device->lock, flags);
1548 outgoing_channel = stor_device->stor_chns[q_num];
1549 if (outgoing_channel != NULL) {
1550 spin_unlock_irqrestore(&stor_device->lock, flags);
1551 goto found_channel;
1552 }
1553 outgoing_channel = get_og_chn(stor_device, q_num);
1554 spin_unlock_irqrestore(&stor_device->lock, flags);
1555 }
1556
1557found_channel:
1558 vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
1559
1560 vstor_packet->vm_srb.length = (sizeof(struct vmscsi_request) -
1561 stor_device->vmscsi_size_delta);
1562
1563
1564 vstor_packet->vm_srb.sense_info_length = sense_buffer_size;
1565
1566
1567 vstor_packet->vm_srb.data_transfer_length =
1568 request->payload->range.len;
1569
1570 vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB;
1571
1572 if (request->payload->range.len) {
1573
1574 ret = vmbus_sendpacket_mpb_desc(outgoing_channel,
1575 request->payload, request->payload_sz,
1576 vstor_packet,
1577 (sizeof(struct vstor_packet) -
1578 stor_device->vmscsi_size_delta),
1579 (unsigned long)request);
1580 } else {
1581 ret = vmbus_sendpacket(outgoing_channel, vstor_packet,
1582 (sizeof(struct vstor_packet) -
1583 stor_device->vmscsi_size_delta),
1584 (unsigned long)request,
1585 VM_PKT_DATA_INBAND,
1586 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1587 }
1588
1589 if (ret != 0)
1590 return ret;
1591
1592 atomic_inc(&stor_device->num_outstanding_req);
1593
1594 return ret;
1595}
1596
1597static int storvsc_device_alloc(struct scsi_device *sdevice)
1598{
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608 sdevice->sdev_bflags = BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES;
1609
1610 return 0;
1611}
1612
1613static int storvsc_device_configure(struct scsi_device *sdevice)
1614{
1615 blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ));
1616
1617 sdevice->no_write_same = 1;
1618
1619
1620
1621
1622
1623
1624 if (!strncmp(sdevice->vendor, "Msft", 4)) {
1625 switch (vmstor_proto_version) {
1626 case VMSTOR_PROTO_VERSION_WIN8:
1627 case VMSTOR_PROTO_VERSION_WIN8_1:
1628 sdevice->scsi_level = SCSI_SPC_3;
1629 break;
1630 }
1631
1632 if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN10)
1633 sdevice->no_write_same = 0;
1634 }
1635
1636 return 0;
1637}
1638
1639static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
1640 sector_t capacity, int *info)
1641{
1642 sector_t nsect = capacity;
1643 sector_t cylinders = nsect;
1644 int heads, sectors_pt;
1645
1646
1647
1648
1649 heads = 0xff;
1650 sectors_pt = 0x3f;
1651 sector_div(cylinders, heads * sectors_pt);
1652 if ((sector_t)(cylinders + 1) * heads * sectors_pt < nsect)
1653 cylinders = 0xffff;
1654
1655 info[0] = heads;
1656 info[1] = sectors_pt;
1657 info[2] = (int)cylinders;
1658
1659 return 0;
1660}
1661
1662static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
1663{
1664 struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
1665 struct hv_device *device = host_dev->dev;
1666
1667 struct storvsc_device *stor_device;
1668 struct storvsc_cmd_request *request;
1669 struct vstor_packet *vstor_packet;
1670 int ret, t;
1671
1672 stor_device = get_out_stor_device(device);
1673 if (!stor_device)
1674 return FAILED;
1675
1676 request = &stor_device->reset_request;
1677 vstor_packet = &request->vstor_packet;
1678 memset(vstor_packet, 0, sizeof(struct vstor_packet));
1679
1680 init_completion(&request->wait_event);
1681
1682 vstor_packet->operation = VSTOR_OPERATION_RESET_BUS;
1683 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
1684 vstor_packet->vm_srb.path_id = stor_device->path_id;
1685
1686 ret = vmbus_sendpacket(device->channel, vstor_packet,
1687 (sizeof(struct vstor_packet) -
1688 stor_device->vmscsi_size_delta),
1689 VMBUS_RQST_RESET,
1690 VM_PKT_DATA_INBAND,
1691 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1692 if (ret != 0)
1693 return FAILED;
1694
1695 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
1696 if (t == 0)
1697 return TIMEOUT_ERROR;
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708 storvsc_wait_to_drain(stor_device);
1709
1710 return SUCCESS;
1711}
1712
1713
1714
1715
1716
1717
1718static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd)
1719{
1720#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
1721 if (scmnd->device->host->transportt == fc_transport_template)
1722 return fc_eh_timed_out(scmnd);
1723#endif
1724 return BLK_EH_RESET_TIMER;
1725}
1726
1727static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
1728{
1729 bool allowed = true;
1730 u8 scsi_op = scmnd->cmnd[0];
1731
1732 switch (scsi_op) {
1733
1734 case WRITE_SAME:
1735
1736
1737
1738
1739 case SET_WINDOW:
1740 set_host_byte(scmnd, DID_ERROR);
1741 allowed = false;
1742 break;
1743 default:
1744 break;
1745 }
1746 return allowed;
1747}
1748
1749static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1750{
1751 int ret;
1752 struct hv_host_device *host_dev = shost_priv(host);
1753 struct hv_device *dev = host_dev->dev;
1754 struct storvsc_cmd_request *cmd_request = scsi_cmd_priv(scmnd);
1755 struct scatterlist *sgl;
1756 struct vmscsi_request *vm_srb;
1757 struct vmbus_packet_mpb_array *payload;
1758 u32 payload_sz;
1759 u32 length;
1760
1761 if (vmstor_proto_version <= VMSTOR_PROTO_VERSION_WIN8) {
1762
1763
1764
1765
1766
1767
1768
1769
1770 if (!storvsc_scsi_cmd_ok(scmnd)) {
1771 scsi_done(scmnd);
1772 return 0;
1773 }
1774 }
1775
1776
1777 cmd_request->cmd = scmnd;
1778
1779 memset(&cmd_request->vstor_packet, 0, sizeof(struct vstor_packet));
1780 vm_srb = &cmd_request->vstor_packet.vm_srb;
1781 vm_srb->win8_extension.time_out_value = 60;
1782
1783 vm_srb->win8_extension.srb_flags |=
1784 SRB_FLAGS_DISABLE_SYNCH_TRANSFER;
1785
1786 if (scmnd->device->tagged_supported) {
1787 vm_srb->win8_extension.srb_flags |=
1788 (SRB_FLAGS_QUEUE_ACTION_ENABLE | SRB_FLAGS_NO_QUEUE_FREEZE);
1789 vm_srb->win8_extension.queue_tag = SP_UNTAGGED;
1790 vm_srb->win8_extension.queue_action = SRB_SIMPLE_TAG_REQUEST;
1791 }
1792
1793
1794 switch (scmnd->sc_data_direction) {
1795 case DMA_TO_DEVICE:
1796 vm_srb->data_in = WRITE_TYPE;
1797 vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_OUT;
1798 break;
1799 case DMA_FROM_DEVICE:
1800 vm_srb->data_in = READ_TYPE;
1801 vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN;
1802 break;
1803 case DMA_NONE:
1804 vm_srb->data_in = UNKNOWN_TYPE;
1805 vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER;
1806 break;
1807 default:
1808
1809
1810
1811
1812 WARN(1, "Unexpected data direction: %d\n",
1813 scmnd->sc_data_direction);
1814 return -EINVAL;
1815 }
1816
1817
1818 vm_srb->port_number = host_dev->port;
1819 vm_srb->path_id = scmnd->device->channel;
1820 vm_srb->target_id = scmnd->device->id;
1821 vm_srb->lun = scmnd->device->lun;
1822
1823 vm_srb->cdb_length = scmnd->cmd_len;
1824
1825 memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
1826
1827 sgl = (struct scatterlist *)scsi_sglist(scmnd);
1828
1829 length = scsi_bufflen(scmnd);
1830 payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb;
1831 payload_sz = sizeof(cmd_request->mpb);
1832
1833 if (scsi_sg_count(scmnd)) {
1834 unsigned long offset_in_hvpg = offset_in_hvpage(sgl->offset);
1835 unsigned int hvpg_count = HVPFN_UP(offset_in_hvpg + length);
1836 struct scatterlist *sg;
1837 unsigned long hvpfn, hvpfns_to_add;
1838 int j, i = 0, sg_count;
1839
1840 if (hvpg_count > MAX_PAGE_BUFFER_COUNT) {
1841
1842 payload_sz = (hvpg_count * sizeof(u64) +
1843 sizeof(struct vmbus_packet_mpb_array));
1844 payload = kzalloc(payload_sz, GFP_ATOMIC);
1845 if (!payload)
1846 return SCSI_MLQUEUE_DEVICE_BUSY;
1847 }
1848
1849 payload->range.len = length;
1850 payload->range.offset = offset_in_hvpg;
1851
1852 sg_count = scsi_dma_map(scmnd);
1853 if (sg_count < 0) {
1854 ret = SCSI_MLQUEUE_DEVICE_BUSY;
1855 goto err_free_payload;
1856 }
1857
1858 for_each_sg(sgl, sg, sg_count, j) {
1859
1860
1861
1862
1863
1864
1865
1866
1867 hvpfn = HVPFN_DOWN(sg_dma_address(sg));
1868 hvpfns_to_add = HVPFN_UP(sg_dma_address(sg) +
1869 sg_dma_len(sg)) - hvpfn;
1870
1871
1872
1873
1874
1875
1876
1877
1878 while (hvpfns_to_add--)
1879 payload->range.pfn_array[i++] = hvpfn++;
1880 }
1881 }
1882
1883 cmd_request->payload = payload;
1884 cmd_request->payload_sz = payload_sz;
1885
1886
1887 ret = storvsc_do_io(dev, cmd_request, get_cpu());
1888 put_cpu();
1889
1890 if (ret == -EAGAIN) {
1891
1892 ret = SCSI_MLQUEUE_DEVICE_BUSY;
1893 goto err_free_payload;
1894 }
1895
1896 return 0;
1897
1898err_free_payload:
1899 if (payload_sz > sizeof(cmd_request->mpb))
1900 kfree(payload);
1901
1902 return ret;
1903}
1904
1905static struct scsi_host_template scsi_driver = {
1906 .module = THIS_MODULE,
1907 .name = "storvsc_host_t",
1908 .cmd_size = sizeof(struct storvsc_cmd_request),
1909 .bios_param = storvsc_get_chs,
1910 .queuecommand = storvsc_queuecommand,
1911 .eh_host_reset_handler = storvsc_host_reset_handler,
1912 .proc_name = "storvsc_host",
1913 .eh_timed_out = storvsc_eh_timed_out,
1914 .slave_alloc = storvsc_device_alloc,
1915 .slave_configure = storvsc_device_configure,
1916 .cmd_per_lun = 2048,
1917 .this_id = -1,
1918
1919 .virt_boundary_mask = PAGE_SIZE-1,
1920 .no_write_same = 1,
1921 .track_queue_depth = 1,
1922 .change_queue_depth = storvsc_change_queue_depth,
1923};
1924
1925enum {
1926 SCSI_GUID,
1927 IDE_GUID,
1928 SFC_GUID,
1929};
1930
1931static const struct hv_vmbus_device_id id_table[] = {
1932
1933 { HV_SCSI_GUID,
1934 .driver_data = SCSI_GUID
1935 },
1936
1937 { HV_IDE_GUID,
1938 .driver_data = IDE_GUID
1939 },
1940
1941 {
1942 HV_SYNTHFC_GUID,
1943 .driver_data = SFC_GUID
1944 },
1945 { },
1946};
1947
1948MODULE_DEVICE_TABLE(vmbus, id_table);
1949
1950static const struct { guid_t guid; } fc_guid = { HV_SYNTHFC_GUID };
1951
1952static bool hv_dev_is_fc(struct hv_device *hv_dev)
1953{
1954 return guid_equal(&fc_guid.guid, &hv_dev->dev_type);
1955}
1956
1957static int storvsc_probe(struct hv_device *device,
1958 const struct hv_vmbus_device_id *dev_id)
1959{
1960 int ret;
1961 int num_cpus = num_online_cpus();
1962 int num_present_cpus = num_present_cpus();
1963 struct Scsi_Host *host;
1964 struct hv_host_device *host_dev;
1965 bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
1966 bool is_fc = ((dev_id->driver_data == SFC_GUID) ? true : false);
1967 int target = 0;
1968 struct storvsc_device *stor_device;
1969 int max_luns_per_target;
1970 int max_targets;
1971 int max_channels;
1972 int max_sub_channels = 0;
1973
1974
1975
1976
1977
1978
1979 if (vmbus_proto_version < VERSION_WIN8) {
1980 max_luns_per_target = STORVSC_IDE_MAX_LUNS_PER_TARGET;
1981 max_targets = STORVSC_IDE_MAX_TARGETS;
1982 max_channels = STORVSC_IDE_MAX_CHANNELS;
1983 } else {
1984 max_luns_per_target = STORVSC_MAX_LUNS_PER_TARGET;
1985 max_targets = STORVSC_MAX_TARGETS;
1986 max_channels = STORVSC_MAX_CHANNELS;
1987
1988
1989
1990
1991
1992
1993 if (!dev_is_ide)
1994 max_sub_channels =
1995 (num_cpus - 1) / storvsc_vcpus_per_sub_channel;
1996 }
1997
1998 scsi_driver.can_queue = max_outstanding_req_per_channel *
1999 (max_sub_channels + 1) *
2000 (100 - ring_avail_percent_lowater) / 100;
2001
2002 host = scsi_host_alloc(&scsi_driver,
2003 sizeof(struct hv_host_device));
2004 if (!host)
2005 return -ENOMEM;
2006
2007 host_dev = shost_priv(host);
2008 memset(host_dev, 0, sizeof(struct hv_host_device));
2009
2010 host_dev->port = host->host_no;
2011 host_dev->dev = device;
2012 host_dev->host = host;
2013
2014
2015 stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
2016 if (!stor_device) {
2017 ret = -ENOMEM;
2018 goto err_out0;
2019 }
2020
2021 stor_device->destroy = false;
2022 init_waitqueue_head(&stor_device->waiting_to_drain);
2023 stor_device->device = device;
2024 stor_device->host = host;
2025 stor_device->vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
2026 spin_lock_init(&stor_device->lock);
2027 hv_set_drvdata(device, stor_device);
2028 dma_set_min_align_mask(&device->device, HV_HYP_PAGE_SIZE - 1);
2029
2030 stor_device->port_number = host->host_no;
2031 ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size, is_fc);
2032 if (ret)
2033 goto err_out1;
2034
2035 host_dev->path = stor_device->path_id;
2036 host_dev->target = stor_device->target_id;
2037
2038 switch (dev_id->driver_data) {
2039 case SFC_GUID:
2040 host->max_lun = STORVSC_FC_MAX_LUNS_PER_TARGET;
2041 host->max_id = STORVSC_FC_MAX_TARGETS;
2042 host->max_channel = STORVSC_FC_MAX_CHANNELS - 1;
2043#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
2044 host->transportt = fc_transport_template;
2045#endif
2046 break;
2047
2048 case SCSI_GUID:
2049 host->max_lun = max_luns_per_target;
2050 host->max_id = max_targets;
2051 host->max_channel = max_channels - 1;
2052 break;
2053
2054 default:
2055 host->max_lun = STORVSC_IDE_MAX_LUNS_PER_TARGET;
2056 host->max_id = STORVSC_IDE_MAX_TARGETS;
2057 host->max_channel = STORVSC_IDE_MAX_CHANNELS - 1;
2058 break;
2059 }
2060
2061 host->max_cmd_len = STORVSC_MAX_CMD_LEN;
2062
2063
2064
2065
2066
2067 host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT);
2068
2069
2070
2071
2072 if (!dev_is_ide) {
2073 if (storvsc_max_hw_queues > num_present_cpus) {
2074 storvsc_max_hw_queues = 0;
2075 storvsc_log(device, STORVSC_LOGGING_WARN,
2076 "Resetting invalid storvsc_max_hw_queues value to default.\n");
2077 }
2078 if (storvsc_max_hw_queues)
2079 host->nr_hw_queues = storvsc_max_hw_queues;
2080 else
2081 host->nr_hw_queues = num_present_cpus;
2082 }
2083
2084
2085
2086
2087 host_dev->handle_error_wq =
2088 alloc_ordered_workqueue("storvsc_error_wq_%d",
2089 WQ_MEM_RECLAIM,
2090 host->host_no);
2091 if (!host_dev->handle_error_wq) {
2092 ret = -ENOMEM;
2093 goto err_out2;
2094 }
2095 INIT_WORK(&host_dev->host_scan_work, storvsc_host_scan);
2096
2097 ret = scsi_add_host(host, &device->device);
2098 if (ret != 0)
2099 goto err_out3;
2100
2101 if (!dev_is_ide) {
2102 scsi_scan_host(host);
2103 } else {
2104 target = (device->dev_instance.b[5] << 8 |
2105 device->dev_instance.b[4]);
2106 ret = scsi_add_device(host, 0, target, 0);
2107 if (ret)
2108 goto err_out4;
2109 }
2110#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
2111 if (host->transportt == fc_transport_template) {
2112 struct fc_rport_identifiers ids = {
2113 .roles = FC_PORT_ROLE_FCP_DUMMY_INITIATOR,
2114 };
2115
2116 fc_host_node_name(host) = stor_device->node_name;
2117 fc_host_port_name(host) = stor_device->port_name;
2118 stor_device->rport = fc_remote_port_add(host, 0, &ids);
2119 if (!stor_device->rport) {
2120 ret = -ENOMEM;
2121 goto err_out4;
2122 }
2123 }
2124#endif
2125 return 0;
2126
2127err_out4:
2128 scsi_remove_host(host);
2129
2130err_out3:
2131 destroy_workqueue(host_dev->handle_error_wq);
2132
2133err_out2:
2134
2135
2136
2137
2138
2139
2140 storvsc_dev_remove(device);
2141 goto err_out0;
2142
2143err_out1:
2144 kfree(stor_device->stor_chns);
2145 kfree(stor_device);
2146
2147err_out0:
2148 scsi_host_put(host);
2149 return ret;
2150}
2151
2152
2153static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth)
2154{
2155 if (queue_depth > scsi_driver.can_queue)
2156 queue_depth = scsi_driver.can_queue;
2157
2158 return scsi_change_queue_depth(sdev, queue_depth);
2159}
2160
2161static int storvsc_remove(struct hv_device *dev)
2162{
2163 struct storvsc_device *stor_device = hv_get_drvdata(dev);
2164 struct Scsi_Host *host = stor_device->host;
2165 struct hv_host_device *host_dev = shost_priv(host);
2166
2167#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
2168 if (host->transportt == fc_transport_template) {
2169 fc_remote_port_delete(stor_device->rport);
2170 fc_remove_host(host);
2171 }
2172#endif
2173 destroy_workqueue(host_dev->handle_error_wq);
2174 scsi_remove_host(host);
2175 storvsc_dev_remove(dev);
2176 scsi_host_put(host);
2177
2178 return 0;
2179}
2180
2181static int storvsc_suspend(struct hv_device *hv_dev)
2182{
2183 struct storvsc_device *stor_device = hv_get_drvdata(hv_dev);
2184 struct Scsi_Host *host = stor_device->host;
2185 struct hv_host_device *host_dev = shost_priv(host);
2186
2187 storvsc_wait_to_drain(stor_device);
2188
2189 drain_workqueue(host_dev->handle_error_wq);
2190
2191 vmbus_close(hv_dev->channel);
2192
2193 kfree(stor_device->stor_chns);
2194 stor_device->stor_chns = NULL;
2195
2196 cpumask_clear(&stor_device->alloced_cpus);
2197
2198 return 0;
2199}
2200
2201static int storvsc_resume(struct hv_device *hv_dev)
2202{
2203 int ret;
2204
2205 ret = storvsc_connect_to_vsp(hv_dev, storvsc_ringbuffer_size,
2206 hv_dev_is_fc(hv_dev));
2207 return ret;
2208}
2209
2210static struct hv_driver storvsc_drv = {
2211 .name = KBUILD_MODNAME,
2212 .id_table = id_table,
2213 .probe = storvsc_probe,
2214 .remove = storvsc_remove,
2215 .suspend = storvsc_suspend,
2216 .resume = storvsc_resume,
2217 .driver = {
2218 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
2219 },
2220};
2221
2222#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
2223static struct fc_function_template fc_transport_functions = {
2224 .show_host_node_name = 1,
2225 .show_host_port_name = 1,
2226};
2227#endif
2228
2229static int __init storvsc_drv_init(void)
2230{
2231 int ret;
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243 max_outstanding_req_per_channel =
2244 ((storvsc_ringbuffer_size - PAGE_SIZE) /
2245 ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
2246 sizeof(struct vstor_packet) + sizeof(u64),
2247 sizeof(u64)));
2248
2249#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
2250 fc_transport_template = fc_attach_transport(&fc_transport_functions);
2251 if (!fc_transport_template)
2252 return -ENODEV;
2253#endif
2254
2255 ret = vmbus_driver_register(&storvsc_drv);
2256
2257#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
2258 if (ret)
2259 fc_release_transport(fc_transport_template);
2260#endif
2261
2262 return ret;
2263}
2264
2265static void __exit storvsc_drv_exit(void)
2266{
2267 vmbus_driver_unregister(&storvsc_drv);
2268#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
2269 fc_release_transport(fc_transport_template);
2270#endif
2271}
2272
2273MODULE_LICENSE("GPL");
2274MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver");
2275module_init(storvsc_drv_init);
2276module_exit(storvsc_drv_exit);
2277