1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/mempool.h>
21#include <linux/virtio.h>
22#include <linux/virtio_ids.h>
23#include <linux/virtio_config.h>
24#include <linux/virtio_scsi.h>
25#include <linux/cpu.h>
26#include <scsi/scsi_host.h>
27#include <scsi/scsi_device.h>
28#include <scsi/scsi_cmnd.h>
29
30#define VIRTIO_SCSI_MEMPOOL_SZ 64
31#define VIRTIO_SCSI_EVENT_LEN 8
32#define VIRTIO_SCSI_VQ_BASE 2
33
34
35struct virtio_scsi_cmd {
36 struct scsi_cmnd *sc;
37 struct completion *comp;
38 union {
39 struct virtio_scsi_cmd_req cmd;
40 struct virtio_scsi_ctrl_tmf_req tmf;
41 struct virtio_scsi_ctrl_an_req an;
42 } req;
43 union {
44 struct virtio_scsi_cmd_resp cmd;
45 struct virtio_scsi_ctrl_tmf_resp tmf;
46 struct virtio_scsi_ctrl_an_resp an;
47 struct virtio_scsi_event evt;
48 } resp;
49} ____cacheline_aligned_in_smp;
50
51struct virtio_scsi_event_node {
52 struct virtio_scsi *vscsi;
53 struct virtio_scsi_event event;
54 struct work_struct work;
55};
56
57struct virtio_scsi_vq {
58
59 spinlock_t vq_lock;
60
61 struct virtqueue *vq;
62};
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90struct virtio_scsi_target_state {
91
92 spinlock_t tgt_lock;
93
94
95 atomic_t reqs;
96
97
98 struct virtio_scsi_vq *req_vq;
99};
100
101
102struct virtio_scsi {
103 struct virtio_device *vdev;
104
105
106 struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
107
108 u32 num_queues;
109
110
111 bool affinity_hint_set;
112
113
114 struct notifier_block nb;
115
116 struct virtio_scsi_vq ctrl_vq;
117 struct virtio_scsi_vq event_vq;
118 struct virtio_scsi_vq req_vqs[];
119};
120
121static struct kmem_cache *virtscsi_cmd_cache;
122static mempool_t *virtscsi_cmd_pool;
123
124static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
125{
126 return vdev->priv;
127}
128
129static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
130{
131 if (!resid)
132 return;
133
134 if (!scsi_bidi_cmnd(sc)) {
135 scsi_set_resid(sc, resid);
136 return;
137 }
138
139 scsi_in(sc)->resid = min(resid, scsi_in(sc)->length);
140 scsi_out(sc)->resid = resid - scsi_in(sc)->resid;
141}
142
143
144
145
146
147
148static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
149{
150 struct virtio_scsi_cmd *cmd = buf;
151 struct scsi_cmnd *sc = cmd->sc;
152 struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
153 struct virtio_scsi_target_state *tgt =
154 scsi_target(sc->device)->hostdata;
155
156 dev_dbg(&sc->device->sdev_gendev,
157 "cmd %p response %u status %#02x sense_len %u\n",
158 sc, resp->response, resp->status, resp->sense_len);
159
160 sc->result = resp->status;
161 virtscsi_compute_resid(sc, resp->resid);
162 switch (resp->response) {
163 case VIRTIO_SCSI_S_OK:
164 set_host_byte(sc, DID_OK);
165 break;
166 case VIRTIO_SCSI_S_OVERRUN:
167 set_host_byte(sc, DID_ERROR);
168 break;
169 case VIRTIO_SCSI_S_ABORTED:
170 set_host_byte(sc, DID_ABORT);
171 break;
172 case VIRTIO_SCSI_S_BAD_TARGET:
173 set_host_byte(sc, DID_BAD_TARGET);
174 break;
175 case VIRTIO_SCSI_S_RESET:
176 set_host_byte(sc, DID_RESET);
177 break;
178 case VIRTIO_SCSI_S_BUSY:
179 set_host_byte(sc, DID_BUS_BUSY);
180 break;
181 case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
182 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
183 break;
184 case VIRTIO_SCSI_S_TARGET_FAILURE:
185 set_host_byte(sc, DID_TARGET_FAILURE);
186 break;
187 case VIRTIO_SCSI_S_NEXUS_FAILURE:
188 set_host_byte(sc, DID_NEXUS_FAILURE);
189 break;
190 default:
191 scmd_printk(KERN_WARNING, sc, "Unknown response %d",
192 resp->response);
193
194 case VIRTIO_SCSI_S_FAILURE:
195 set_host_byte(sc, DID_ERROR);
196 break;
197 }
198
199 WARN_ON(resp->sense_len > VIRTIO_SCSI_SENSE_SIZE);
200 if (sc->sense_buffer) {
201 memcpy(sc->sense_buffer, resp->sense,
202 min_t(u32, resp->sense_len, VIRTIO_SCSI_SENSE_SIZE));
203 if (resp->sense_len)
204 set_driver_byte(sc, DRIVER_SENSE);
205 }
206
207 mempool_free(cmd, virtscsi_cmd_pool);
208 sc->scsi_done(sc);
209
210 atomic_dec(&tgt->reqs);
211}
212
213static void virtscsi_vq_done(struct virtio_scsi *vscsi,
214 struct virtio_scsi_vq *virtscsi_vq,
215 void (*fn)(struct virtio_scsi *vscsi, void *buf))
216{
217 void *buf;
218 unsigned int len;
219 unsigned long flags;
220 struct virtqueue *vq = virtscsi_vq->vq;
221
222 spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
223 do {
224 virtqueue_disable_cb(vq);
225 while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
226 fn(vscsi, buf);
227 } while (!virtqueue_enable_cb(vq));
228 spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
229}
230
231static void virtscsi_req_done(struct virtqueue *vq)
232{
233 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
234 struct virtio_scsi *vscsi = shost_priv(sh);
235 int index = vq->index - VIRTIO_SCSI_VQ_BASE;
236 struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268 smp_read_barrier_depends();
269
270 virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
271};
272
273static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
274{
275 struct virtio_scsi_cmd *cmd = buf;
276
277 if (cmd->comp)
278 complete_all(cmd->comp);
279 else
280 mempool_free(cmd, virtscsi_cmd_pool);
281}
282
283static void virtscsi_ctrl_done(struct virtqueue *vq)
284{
285 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
286 struct virtio_scsi *vscsi = shost_priv(sh);
287
288 virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
289};
290
291static int virtscsi_kick_event(struct virtio_scsi *vscsi,
292 struct virtio_scsi_event_node *event_node)
293{
294 int err;
295 struct scatterlist sg;
296 unsigned long flags;
297
298 sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
299
300 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
301
302 err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node,
303 GFP_ATOMIC);
304 if (!err)
305 virtqueue_kick(vscsi->event_vq.vq);
306
307 spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
308
309 return err;
310}
311
312static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
313{
314 int i;
315
316 for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) {
317 vscsi->event_list[i].vscsi = vscsi;
318 virtscsi_kick_event(vscsi, &vscsi->event_list[i]);
319 }
320
321 return 0;
322}
323
324static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
325{
326 int i;
327
328 for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
329 cancel_work_sync(&vscsi->event_list[i].work);
330}
331
332static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
333 struct virtio_scsi_event *event)
334{
335 struct scsi_device *sdev;
336 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
337 unsigned int target = event->lun[1];
338 unsigned int lun = (event->lun[2] << 8) | event->lun[3];
339
340 switch (event->reason) {
341 case VIRTIO_SCSI_EVT_RESET_RESCAN:
342 scsi_add_device(shost, 0, target, lun);
343 break;
344 case VIRTIO_SCSI_EVT_RESET_REMOVED:
345 sdev = scsi_device_lookup(shost, 0, target, lun);
346 if (sdev) {
347 scsi_remove_device(sdev);
348 scsi_device_put(sdev);
349 } else {
350 pr_err("SCSI device %d 0 %d %d not found\n",
351 shost->host_no, target, lun);
352 }
353 break;
354 default:
355 pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
356 }
357}
358
359static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
360 struct virtio_scsi_event *event)
361{
362 struct scsi_device *sdev;
363 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
364 unsigned int target = event->lun[1];
365 unsigned int lun = (event->lun[2] << 8) | event->lun[3];
366 u8 asc = event->reason & 255;
367 u8 ascq = event->reason >> 8;
368
369 sdev = scsi_device_lookup(shost, 0, target, lun);
370 if (!sdev) {
371 pr_err("SCSI device %d 0 %d %d not found\n",
372 shost->host_no, target, lun);
373 return;
374 }
375
376
377
378 if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
379 scsi_rescan_device(&sdev->sdev_gendev);
380
381 scsi_device_put(sdev);
382}
383
384static void virtscsi_handle_event(struct work_struct *work)
385{
386 struct virtio_scsi_event_node *event_node =
387 container_of(work, struct virtio_scsi_event_node, work);
388 struct virtio_scsi *vscsi = event_node->vscsi;
389 struct virtio_scsi_event *event = &event_node->event;
390
391 if (event->event & VIRTIO_SCSI_T_EVENTS_MISSED) {
392 event->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED;
393 scsi_scan_host(virtio_scsi_host(vscsi->vdev));
394 }
395
396 switch (event->event) {
397 case VIRTIO_SCSI_T_NO_EVENT:
398 break;
399 case VIRTIO_SCSI_T_TRANSPORT_RESET:
400 virtscsi_handle_transport_reset(vscsi, event);
401 break;
402 case VIRTIO_SCSI_T_PARAM_CHANGE:
403 virtscsi_handle_param_change(vscsi, event);
404 break;
405 default:
406 pr_err("Unsupport virtio scsi event %x\n", event->event);
407 }
408 virtscsi_kick_event(vscsi, event_node);
409}
410
411static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
412{
413 struct virtio_scsi_event_node *event_node = buf;
414
415 INIT_WORK(&event_node->work, virtscsi_handle_event);
416 schedule_work(&event_node->work);
417}
418
419static void virtscsi_event_done(struct virtqueue *vq)
420{
421 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
422 struct virtio_scsi *vscsi = shost_priv(sh);
423
424 virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
425};
426
427
428
429
430
431
432
433
434
435static int virtscsi_add_cmd(struct virtqueue *vq,
436 struct virtio_scsi_cmd *cmd,
437 size_t req_size, size_t resp_size, gfp_t gfp)
438{
439 struct scsi_cmnd *sc = cmd->sc;
440 struct scatterlist *sgs[4], req, resp;
441 struct sg_table *out, *in;
442 unsigned out_num = 0, in_num = 0;
443
444 out = in = NULL;
445
446 if (sc && sc->sc_data_direction != DMA_NONE) {
447 if (sc->sc_data_direction != DMA_FROM_DEVICE)
448 out = &scsi_out(sc)->table;
449 if (sc->sc_data_direction != DMA_TO_DEVICE)
450 in = &scsi_in(sc)->table;
451 }
452
453
454 sg_init_one(&req, &cmd->req, req_size);
455 sgs[out_num++] = &req;
456
457
458 if (out)
459 sgs[out_num++] = out->sgl;
460
461
462 sg_init_one(&resp, &cmd->resp, resp_size);
463 sgs[out_num + in_num++] = &resp;
464
465
466 if (in)
467 sgs[out_num + in_num++] = in->sgl;
468
469 return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, gfp);
470}
471
472static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
473 struct virtio_scsi_cmd *cmd,
474 size_t req_size, size_t resp_size, gfp_t gfp)
475{
476 unsigned long flags;
477 int err;
478 bool needs_kick = false;
479
480 spin_lock_irqsave(&vq->vq_lock, flags);
481 err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size, gfp);
482 if (!err)
483 needs_kick = virtqueue_kick_prepare(vq->vq);
484
485 spin_unlock_irqrestore(&vq->vq_lock, flags);
486
487 if (needs_kick)
488 virtqueue_notify(vq->vq);
489 return err;
490}
491
492static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
493 struct virtio_scsi_vq *req_vq,
494 struct scsi_cmnd *sc)
495{
496 struct virtio_scsi_cmd *cmd;
497 int ret;
498
499 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
500 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
501
502
503 BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
504
505 dev_dbg(&sc->device->sdev_gendev,
506 "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
507
508 ret = SCSI_MLQUEUE_HOST_BUSY;
509 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC);
510 if (!cmd)
511 goto out;
512
513 memset(cmd, 0, sizeof(*cmd));
514 cmd->sc = sc;
515 cmd->req.cmd = (struct virtio_scsi_cmd_req){
516 .lun[0] = 1,
517 .lun[1] = sc->device->id,
518 .lun[2] = (sc->device->lun >> 8) | 0x40,
519 .lun[3] = sc->device->lun & 0xff,
520 .tag = (unsigned long)sc,
521 .task_attr = VIRTIO_SCSI_S_SIMPLE,
522 .prio = 0,
523 .crn = 0,
524 };
525
526 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
527 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
528
529 if (virtscsi_kick_cmd(req_vq, cmd,
530 sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
531 GFP_ATOMIC) == 0)
532 ret = 0;
533 else
534 mempool_free(cmd, virtscsi_cmd_pool);
535
536out:
537 return ret;
538}
539
540static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
541 struct scsi_cmnd *sc)
542{
543 struct virtio_scsi *vscsi = shost_priv(sh);
544 struct virtio_scsi_target_state *tgt =
545 scsi_target(sc->device)->hostdata;
546
547 atomic_inc(&tgt->reqs);
548 return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
549}
550
551static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
552 struct virtio_scsi_target_state *tgt)
553{
554 struct virtio_scsi_vq *vq;
555 unsigned long flags;
556 u32 queue_num;
557
558 spin_lock_irqsave(&tgt->tgt_lock, flags);
559
560
561
562
563
564 if (atomic_inc_return(&tgt->reqs) > 1)
565 vq = ACCESS_ONCE(tgt->req_vq);
566 else {
567 queue_num = smp_processor_id();
568 while (unlikely(queue_num >= vscsi->num_queues))
569 queue_num -= vscsi->num_queues;
570
571 tgt->req_vq = vq = &vscsi->req_vqs[queue_num];
572 }
573
574 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
575 return vq;
576}
577
578static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
579 struct scsi_cmnd *sc)
580{
581 struct virtio_scsi *vscsi = shost_priv(sh);
582 struct virtio_scsi_target_state *tgt =
583 scsi_target(sc->device)->hostdata;
584 struct virtio_scsi_vq *req_vq = virtscsi_pick_vq(vscsi, tgt);
585
586 return virtscsi_queuecommand(vscsi, req_vq, sc);
587}
588
589static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
590{
591 DECLARE_COMPLETION_ONSTACK(comp);
592 int ret = FAILED;
593
594 cmd->comp = ∁
595 if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd,
596 sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
597 GFP_NOIO) < 0)
598 goto out;
599
600 wait_for_completion(&comp);
601 if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
602 cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
603 ret = SUCCESS;
604
605out:
606 mempool_free(cmd, virtscsi_cmd_pool);
607 return ret;
608}
609
610static int virtscsi_device_reset(struct scsi_cmnd *sc)
611{
612 struct virtio_scsi *vscsi = shost_priv(sc->device->host);
613 struct virtio_scsi_cmd *cmd;
614
615 sdev_printk(KERN_INFO, sc->device, "device reset\n");
616 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
617 if (!cmd)
618 return FAILED;
619
620 memset(cmd, 0, sizeof(*cmd));
621 cmd->sc = sc;
622 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
623 .type = VIRTIO_SCSI_T_TMF,
624 .subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET,
625 .lun[0] = 1,
626 .lun[1] = sc->device->id,
627 .lun[2] = (sc->device->lun >> 8) | 0x40,
628 .lun[3] = sc->device->lun & 0xff,
629 };
630 return virtscsi_tmf(vscsi, cmd);
631}
632
633static int virtscsi_abort(struct scsi_cmnd *sc)
634{
635 struct virtio_scsi *vscsi = shost_priv(sc->device->host);
636 struct virtio_scsi_cmd *cmd;
637
638 scmd_printk(KERN_INFO, sc, "abort\n");
639 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
640 if (!cmd)
641 return FAILED;
642
643 memset(cmd, 0, sizeof(*cmd));
644 cmd->sc = sc;
645 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
646 .type = VIRTIO_SCSI_T_TMF,
647 .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
648 .lun[0] = 1,
649 .lun[1] = sc->device->id,
650 .lun[2] = (sc->device->lun >> 8) | 0x40,
651 .lun[3] = sc->device->lun & 0xff,
652 .tag = (unsigned long)sc,
653 };
654 return virtscsi_tmf(vscsi, cmd);
655}
656
657static int virtscsi_target_alloc(struct scsi_target *starget)
658{
659 struct virtio_scsi_target_state *tgt =
660 kmalloc(sizeof(*tgt), GFP_KERNEL);
661 if (!tgt)
662 return -ENOMEM;
663
664 spin_lock_init(&tgt->tgt_lock);
665 atomic_set(&tgt->reqs, 0);
666 tgt->req_vq = NULL;
667
668 starget->hostdata = tgt;
669 return 0;
670}
671
672static void virtscsi_target_destroy(struct scsi_target *starget)
673{
674 struct virtio_scsi_target_state *tgt = starget->hostdata;
675 kfree(tgt);
676}
677
678static struct scsi_host_template virtscsi_host_template_single = {
679 .module = THIS_MODULE,
680 .name = "Virtio SCSI HBA",
681 .proc_name = "virtio_scsi",
682 .this_id = -1,
683 .queuecommand = virtscsi_queuecommand_single,
684 .eh_abort_handler = virtscsi_abort,
685 .eh_device_reset_handler = virtscsi_device_reset,
686
687 .can_queue = 1024,
688 .dma_boundary = UINT_MAX,
689 .use_clustering = ENABLE_CLUSTERING,
690 .target_alloc = virtscsi_target_alloc,
691 .target_destroy = virtscsi_target_destroy,
692};
693
694static struct scsi_host_template virtscsi_host_template_multi = {
695 .module = THIS_MODULE,
696 .name = "Virtio SCSI HBA",
697 .proc_name = "virtio_scsi",
698 .this_id = -1,
699 .queuecommand = virtscsi_queuecommand_multi,
700 .eh_abort_handler = virtscsi_abort,
701 .eh_device_reset_handler = virtscsi_device_reset,
702
703 .can_queue = 1024,
704 .dma_boundary = UINT_MAX,
705 .use_clustering = ENABLE_CLUSTERING,
706 .target_alloc = virtscsi_target_alloc,
707 .target_destroy = virtscsi_target_destroy,
708};
709
710#define virtscsi_config_get(vdev, fld) \
711 ({ \
712 typeof(((struct virtio_scsi_config *)0)->fld) __val; \
713 vdev->config->get(vdev, \
714 offsetof(struct virtio_scsi_config, fld), \
715 &__val, sizeof(__val)); \
716 __val; \
717 })
718
719#define virtscsi_config_set(vdev, fld, val) \
720 (void)({ \
721 typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
722 vdev->config->set(vdev, \
723 offsetof(struct virtio_scsi_config, fld), \
724 &__val, sizeof(__val)); \
725 })
726
727static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
728{
729 int i;
730 int cpu;
731
732
733
734
735
736
737 if ((vscsi->num_queues == 1 ||
738 vscsi->num_queues != num_online_cpus()) && affinity) {
739 if (vscsi->affinity_hint_set)
740 affinity = false;
741 else
742 return;
743 }
744
745 if (affinity) {
746 i = 0;
747 for_each_online_cpu(cpu) {
748 virtqueue_set_affinity(vscsi->req_vqs[i].vq, cpu);
749 i++;
750 }
751
752 vscsi->affinity_hint_set = true;
753 } else {
754 for (i = 0; i < vscsi->num_queues; i++)
755 virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
756
757 vscsi->affinity_hint_set = false;
758 }
759}
760
761static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
762{
763 get_online_cpus();
764 __virtscsi_set_affinity(vscsi, affinity);
765 put_online_cpus();
766}
767
768static int virtscsi_cpu_callback(struct notifier_block *nfb,
769 unsigned long action, void *hcpu)
770{
771 struct virtio_scsi *vscsi = container_of(nfb, struct virtio_scsi, nb);
772 switch(action) {
773 case CPU_ONLINE:
774 case CPU_ONLINE_FROZEN:
775 case CPU_DEAD:
776 case CPU_DEAD_FROZEN:
777 __virtscsi_set_affinity(vscsi, true);
778 break;
779 default:
780 break;
781 }
782 return NOTIFY_OK;
783}
784
785static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
786 struct virtqueue *vq)
787{
788 spin_lock_init(&virtscsi_vq->vq_lock);
789 virtscsi_vq->vq = vq;
790}
791
792static void virtscsi_scan(struct virtio_device *vdev)
793{
794 struct Scsi_Host *shost = (struct Scsi_Host *)vdev->priv;
795
796 scsi_scan_host(shost);
797}
798
799static void virtscsi_remove_vqs(struct virtio_device *vdev)
800{
801 struct Scsi_Host *sh = virtio_scsi_host(vdev);
802 struct virtio_scsi *vscsi = shost_priv(sh);
803
804 virtscsi_set_affinity(vscsi, false);
805
806
807 vdev->config->reset(vdev);
808
809 vdev->config->del_vqs(vdev);
810}
811
812static int virtscsi_init(struct virtio_device *vdev,
813 struct virtio_scsi *vscsi)
814{
815 int err;
816 u32 i;
817 u32 num_vqs;
818 vq_callback_t **callbacks;
819 const char **names;
820 struct virtqueue **vqs;
821
822 num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE;
823 vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL);
824 callbacks = kmalloc(num_vqs * sizeof(vq_callback_t *), GFP_KERNEL);
825 names = kmalloc(num_vqs * sizeof(char *), GFP_KERNEL);
826
827 if (!callbacks || !vqs || !names) {
828 err = -ENOMEM;
829 goto out;
830 }
831
832 callbacks[0] = virtscsi_ctrl_done;
833 callbacks[1] = virtscsi_event_done;
834 names[0] = "control";
835 names[1] = "event";
836 for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) {
837 callbacks[i] = virtscsi_req_done;
838 names[i] = "request";
839 }
840
841
842 err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
843 if (err)
844 goto out;
845
846 virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
847 virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
848 for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++)
849 virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
850 vqs[i]);
851
852 virtscsi_set_affinity(vscsi, true);
853
854 virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
855 virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
856
857 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
858 virtscsi_kick_event_all(vscsi);
859
860 err = 0;
861
862out:
863 kfree(names);
864 kfree(callbacks);
865 kfree(vqs);
866 if (err)
867 virtscsi_remove_vqs(vdev);
868 return err;
869}
870
871static int virtscsi_probe(struct virtio_device *vdev)
872{
873 struct Scsi_Host *shost;
874 struct virtio_scsi *vscsi;
875 int err;
876 u32 sg_elems, num_targets;
877 u32 cmd_per_lun;
878 u32 num_queues;
879 struct scsi_host_template *hostt;
880
881
882 num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
883
884 num_targets = virtscsi_config_get(vdev, max_target) + 1;
885
886 if (num_queues == 1)
887 hostt = &virtscsi_host_template_single;
888 else
889 hostt = &virtscsi_host_template_multi;
890
891 shost = scsi_host_alloc(hostt,
892 sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues);
893 if (!shost)
894 return -ENOMEM;
895
896 sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
897 shost->sg_tablesize = sg_elems;
898 vscsi = shost_priv(shost);
899 vscsi->vdev = vdev;
900 vscsi->num_queues = num_queues;
901 vdev->priv = shost;
902
903 err = virtscsi_init(vdev, vscsi);
904 if (err)
905 goto virtscsi_init_failed;
906
907 vscsi->nb.notifier_call = &virtscsi_cpu_callback;
908 err = register_hotcpu_notifier(&vscsi->nb);
909 if (err) {
910 pr_err("registering cpu notifier failed\n");
911 goto scsi_add_host_failed;
912 }
913
914 cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
915 shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
916 shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
917
918
919
920
921 shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000;
922 shost->max_id = num_targets;
923 shost->max_channel = 0;
924 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
925 err = scsi_add_host(shost, &vdev->dev);
926 if (err)
927 goto scsi_add_host_failed;
928
929
930
931
932 return 0;
933
934scsi_add_host_failed:
935 vdev->config->del_vqs(vdev);
936virtscsi_init_failed:
937 scsi_host_put(shost);
938 return err;
939}
940
941static void virtscsi_remove(struct virtio_device *vdev)
942{
943 struct Scsi_Host *shost = virtio_scsi_host(vdev);
944 struct virtio_scsi *vscsi = shost_priv(shost);
945
946 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
947 virtscsi_cancel_event_work(vscsi);
948
949 scsi_remove_host(shost);
950
951 unregister_hotcpu_notifier(&vscsi->nb);
952
953 virtscsi_remove_vqs(vdev);
954 scsi_host_put(shost);
955}
956
957#ifdef CONFIG_PM
958static int virtscsi_freeze(struct virtio_device *vdev)
959{
960 virtscsi_remove_vqs(vdev);
961 return 0;
962}
963
964static int virtscsi_restore(struct virtio_device *vdev)
965{
966 struct Scsi_Host *sh = virtio_scsi_host(vdev);
967 struct virtio_scsi *vscsi = shost_priv(sh);
968
969 return virtscsi_init(vdev, vscsi);
970}
971#endif
972
973static struct virtio_device_id id_table[] = {
974 { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
975 { 0 },
976};
977
978static unsigned int features[] = {
979 VIRTIO_SCSI_F_HOTPLUG,
980 VIRTIO_SCSI_F_CHANGE,
981};
982
983static struct virtio_driver virtio_scsi_driver = {
984 .feature_table = features,
985 .feature_table_size = ARRAY_SIZE(features),
986 .driver.name = KBUILD_MODNAME,
987 .driver.owner = THIS_MODULE,
988 .id_table = id_table,
989 .probe = virtscsi_probe,
990 .scan = virtscsi_scan,
991#ifdef CONFIG_PM
992 .freeze = virtscsi_freeze,
993 .restore = virtscsi_restore,
994#endif
995 .remove = virtscsi_remove,
996};
997
998static int __init init(void)
999{
1000 int ret = -ENOMEM;
1001
1002 virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
1003 if (!virtscsi_cmd_cache) {
1004 pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n");
1005 goto error;
1006 }
1007
1008
1009 virtscsi_cmd_pool =
1010 mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
1011 virtscsi_cmd_cache);
1012 if (!virtscsi_cmd_pool) {
1013 pr_err("mempool_create() for virtscsi_cmd_pool failed\n");
1014 goto error;
1015 }
1016 ret = register_virtio_driver(&virtio_scsi_driver);
1017 if (ret < 0)
1018 goto error;
1019
1020 return 0;
1021
1022error:
1023 if (virtscsi_cmd_pool) {
1024 mempool_destroy(virtscsi_cmd_pool);
1025 virtscsi_cmd_pool = NULL;
1026 }
1027 if (virtscsi_cmd_cache) {
1028 kmem_cache_destroy(virtscsi_cmd_cache);
1029 virtscsi_cmd_cache = NULL;
1030 }
1031 return ret;
1032}
1033
1034static void __exit fini(void)
1035{
1036 unregister_virtio_driver(&virtio_scsi_driver);
1037 mempool_destroy(virtscsi_cmd_pool);
1038 kmem_cache_destroy(virtscsi_cmd_cache);
1039}
1040module_init(init);
1041module_exit(fini);
1042
1043MODULE_DEVICE_TABLE(virtio, id_table);
1044MODULE_DESCRIPTION("Virtio SCSI HBA driver");
1045MODULE_LICENSE("GPL");
1046