1
2
3
4
5
6#include <linux/module.h>
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/blk-mq.h>
10#include <linux/hdreg.h>
11#include <linux/genhd.h>
12#include <linux/cdrom.h>
13#include <linux/slab.h>
14#include <linux/spinlock.h>
15#include <linux/completion.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/list.h>
19#include <linux/scatterlist.h>
20
21#include <asm/vio.h>
22#include <asm/ldc.h>
23
24#define DRV_MODULE_NAME "sunvdc"
25#define PFX DRV_MODULE_NAME ": "
26#define DRV_MODULE_VERSION "1.2"
27#define DRV_MODULE_RELDATE "November 24, 2014"
28
29static char version[] =
30 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
31MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
32MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
33MODULE_LICENSE("GPL");
34MODULE_VERSION(DRV_MODULE_VERSION);
35
36#define VDC_TX_RING_SIZE 512
37#define VDC_DEFAULT_BLK_SIZE 512
38
39#define WAITING_FOR_LINK_UP 0x01
40#define WAITING_FOR_TX_SPACE 0x02
41#define WAITING_FOR_GEN_CMD 0x04
42#define WAITING_FOR_ANY -1
43
44#define VDC_MAX_RETRIES 10
45
46static struct workqueue_struct *sunvdc_wq;
47
48struct vdc_req_entry {
49 struct request *req;
50};
51
52struct vdc_port {
53 struct vio_driver_state vio;
54
55 struct gendisk *disk;
56
57 struct vdc_completion *cmp;
58
59 u64 req_id;
60 u64 seq;
61 struct vdc_req_entry rq_arr[VDC_TX_RING_SIZE];
62
63 unsigned long ring_cookies;
64
65 u64 max_xfer_size;
66 u32 vdisk_block_size;
67 u32 drain;
68
69 u64 ldc_timeout;
70 struct delayed_work ldc_reset_timer_work;
71 struct work_struct ldc_reset_work;
72
73
74
75
76 u64 operations;
77 u32 vdisk_size;
78 u8 vdisk_type;
79 u8 vdisk_mtype;
80 u32 vdisk_phys_blksz;
81
82 struct blk_mq_tag_set tag_set;
83
84 char disk_name[32];
85};
86
87static void vdc_ldc_reset(struct vdc_port *port);
88static void vdc_ldc_reset_work(struct work_struct *work);
89static void vdc_ldc_reset_timer_work(struct work_struct *work);
90
91static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
92{
93 return container_of(vio, struct vdc_port, vio);
94}
95
96
97static struct vio_version vdc_versions[] = {
98 { .major = 1, .minor = 2 },
99 { .major = 1, .minor = 1 },
100 { .major = 1, .minor = 0 },
101};
102
103static inline int vdc_version_supported(struct vdc_port *port,
104 u16 major, u16 minor)
105{
106 return port->vio.ver.major == major && port->vio.ver.minor >= minor;
107}
108
109#define VDCBLK_NAME "vdisk"
110static int vdc_major;
111#define PARTITION_SHIFT 3
112
113static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
114{
115 return vio_dring_avail(dr, VDC_TX_RING_SIZE);
116}
117
118static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
119{
120 struct gendisk *disk = bdev->bd_disk;
121 sector_t nsect = get_capacity(disk);
122 sector_t cylinders = nsect;
123
124 geo->heads = 0xff;
125 geo->sectors = 0x3f;
126 sector_div(cylinders, geo->heads * geo->sectors);
127 geo->cylinders = cylinders;
128 if ((sector_t)(geo->cylinders + 1) * geo->heads * geo->sectors < nsect)
129 geo->cylinders = 0xffff;
130
131 return 0;
132}
133
134
135
136
137
138static int vdc_ioctl(struct block_device *bdev, fmode_t mode,
139 unsigned command, unsigned long argument)
140{
141 int i;
142 struct gendisk *disk;
143
144 switch (command) {
145 case CDROMMULTISESSION:
146 pr_debug(PFX "Multisession CDs not supported\n");
147 for (i = 0; i < sizeof(struct cdrom_multisession); i++)
148 if (put_user(0, (char __user *)(argument + i)))
149 return -EFAULT;
150 return 0;
151
152 case CDROM_GET_CAPABILITY:
153 disk = bdev->bd_disk;
154
155 if (bdev->bd_disk && (disk->flags & GENHD_FL_CD))
156 return 0;
157 return -EINVAL;
158
159 default:
160 pr_debug(PFX "ioctl %08x not supported\n", command);
161 return -EINVAL;
162 }
163}
164
165static const struct block_device_operations vdc_fops = {
166 .owner = THIS_MODULE,
167 .getgeo = vdc_getgeo,
168 .ioctl = vdc_ioctl,
169};
170
171static void vdc_blk_queue_start(struct vdc_port *port)
172{
173 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
174
175
176
177
178
179 if (port->disk && vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
180 blk_mq_start_hw_queues(port->disk->queue);
181}
182
183static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
184{
185 if (vio->cmp &&
186 (waiting_for == -1 ||
187 vio->cmp->waiting_for == waiting_for)) {
188 vio->cmp->err = err;
189 complete(&vio->cmp->com);
190 vio->cmp = NULL;
191 }
192}
193
194static void vdc_handshake_complete(struct vio_driver_state *vio)
195{
196 struct vdc_port *port = to_vdc_port(vio);
197
198 cancel_delayed_work(&port->ldc_reset_timer_work);
199 vdc_finish(vio, 0, WAITING_FOR_LINK_UP);
200 vdc_blk_queue_start(port);
201}
202
203static int vdc_handle_unknown(struct vdc_port *port, void *arg)
204{
205 struct vio_msg_tag *pkt = arg;
206
207 printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n",
208 pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
209 printk(KERN_ERR PFX "Resetting connection.\n");
210
211 ldc_disconnect(port->vio.lp);
212
213 return -ECONNRESET;
214}
215
216static int vdc_send_attr(struct vio_driver_state *vio)
217{
218 struct vdc_port *port = to_vdc_port(vio);
219 struct vio_disk_attr_info pkt;
220
221 memset(&pkt, 0, sizeof(pkt));
222
223 pkt.tag.type = VIO_TYPE_CTRL;
224 pkt.tag.stype = VIO_SUBTYPE_INFO;
225 pkt.tag.stype_env = VIO_ATTR_INFO;
226 pkt.tag.sid = vio_send_sid(vio);
227
228 pkt.xfer_mode = VIO_DRING_MODE;
229 pkt.vdisk_block_size = port->vdisk_block_size;
230 pkt.max_xfer_size = port->max_xfer_size;
231
232 viodbg(HS, "SEND ATTR xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
233 pkt.xfer_mode, pkt.vdisk_block_size, pkt.max_xfer_size);
234
235 return vio_ldc_send(&port->vio, &pkt, sizeof(pkt));
236}
237
238static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
239{
240 struct vdc_port *port = to_vdc_port(vio);
241 struct vio_disk_attr_info *pkt = arg;
242
243 viodbg(HS, "GOT ATTR stype[0x%x] ops[%llx] disk_size[%llu] disk_type[%x] "
244 "mtype[0x%x] xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
245 pkt->tag.stype, pkt->operations,
246 pkt->vdisk_size, pkt->vdisk_type, pkt->vdisk_mtype,
247 pkt->xfer_mode, pkt->vdisk_block_size,
248 pkt->max_xfer_size);
249
250 if (pkt->tag.stype == VIO_SUBTYPE_ACK) {
251 switch (pkt->vdisk_type) {
252 case VD_DISK_TYPE_DISK:
253 case VD_DISK_TYPE_SLICE:
254 break;
255
256 default:
257 printk(KERN_ERR PFX "%s: Bogus vdisk_type 0x%x\n",
258 vio->name, pkt->vdisk_type);
259 return -ECONNRESET;
260 }
261
262 if (pkt->vdisk_block_size > port->vdisk_block_size) {
263 printk(KERN_ERR PFX "%s: BLOCK size increased "
264 "%u --> %u\n",
265 vio->name,
266 port->vdisk_block_size, pkt->vdisk_block_size);
267 return -ECONNRESET;
268 }
269
270 port->operations = pkt->operations;
271 port->vdisk_type = pkt->vdisk_type;
272 if (vdc_version_supported(port, 1, 1)) {
273 port->vdisk_size = pkt->vdisk_size;
274 port->vdisk_mtype = pkt->vdisk_mtype;
275 }
276 if (pkt->max_xfer_size < port->max_xfer_size)
277 port->max_xfer_size = pkt->max_xfer_size;
278 port->vdisk_block_size = pkt->vdisk_block_size;
279
280 port->vdisk_phys_blksz = VDC_DEFAULT_BLK_SIZE;
281 if (vdc_version_supported(port, 1, 2))
282 port->vdisk_phys_blksz = pkt->phys_block_size;
283
284 return 0;
285 } else {
286 printk(KERN_ERR PFX "%s: Attribute NACK\n", vio->name);
287
288 return -ECONNRESET;
289 }
290}
291
292static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
293{
294 int err = desc->status;
295
296 vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
297}
298
299static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
300 unsigned int index)
301{
302 struct vio_disk_desc *desc = vio_dring_entry(dr, index);
303 struct vdc_req_entry *rqe = &port->rq_arr[index];
304 struct request *req;
305
306 if (unlikely(desc->hdr.state != VIO_DESC_DONE))
307 return;
308
309 ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
310 desc->hdr.state = VIO_DESC_FREE;
311 dr->cons = vio_dring_next(dr, index);
312
313 req = rqe->req;
314 if (req == NULL) {
315 vdc_end_special(port, desc);
316 return;
317 }
318
319 rqe->req = NULL;
320
321 blk_mq_end_request(req, desc->status ? BLK_STS_IOERR : 0);
322
323 vdc_blk_queue_start(port);
324}
325
326static int vdc_ack(struct vdc_port *port, void *msgbuf)
327{
328 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
329 struct vio_dring_data *pkt = msgbuf;
330
331 if (unlikely(pkt->dring_ident != dr->ident ||
332 pkt->start_idx != pkt->end_idx ||
333 pkt->start_idx >= VDC_TX_RING_SIZE))
334 return 0;
335
336 vdc_end_one(port, dr, pkt->start_idx);
337
338 return 0;
339}
340
341static int vdc_nack(struct vdc_port *port, void *msgbuf)
342{
343
344 return 0;
345}
346
347static void vdc_event(void *arg, int event)
348{
349 struct vdc_port *port = arg;
350 struct vio_driver_state *vio = &port->vio;
351 unsigned long flags;
352 int err;
353
354 spin_lock_irqsave(&vio->lock, flags);
355
356 if (unlikely(event == LDC_EVENT_RESET)) {
357 vio_link_state_change(vio, event);
358 queue_work(sunvdc_wq, &port->ldc_reset_work);
359 goto out;
360 }
361
362 if (unlikely(event == LDC_EVENT_UP)) {
363 vio_link_state_change(vio, event);
364 goto out;
365 }
366
367 if (unlikely(event != LDC_EVENT_DATA_READY)) {
368 pr_warn(PFX "Unexpected LDC event %d\n", event);
369 goto out;
370 }
371
372 err = 0;
373 while (1) {
374 union {
375 struct vio_msg_tag tag;
376 u64 raw[8];
377 } msgbuf;
378
379 err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
380 if (unlikely(err < 0)) {
381 if (err == -ECONNRESET)
382 vio_conn_reset(vio);
383 break;
384 }
385 if (err == 0)
386 break;
387 viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
388 msgbuf.tag.type,
389 msgbuf.tag.stype,
390 msgbuf.tag.stype_env,
391 msgbuf.tag.sid);
392 err = vio_validate_sid(vio, &msgbuf.tag);
393 if (err < 0)
394 break;
395
396 if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
397 if (msgbuf.tag.stype == VIO_SUBTYPE_ACK)
398 err = vdc_ack(port, &msgbuf);
399 else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK)
400 err = vdc_nack(port, &msgbuf);
401 else
402 err = vdc_handle_unknown(port, &msgbuf);
403 } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
404 err = vio_control_pkt_engine(vio, &msgbuf);
405 } else {
406 err = vdc_handle_unknown(port, &msgbuf);
407 }
408 if (err < 0)
409 break;
410 }
411 if (err < 0)
412 vdc_finish(&port->vio, err, WAITING_FOR_ANY);
413out:
414 spin_unlock_irqrestore(&vio->lock, flags);
415}
416
417static int __vdc_tx_trigger(struct vdc_port *port)
418{
419 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
420 struct vio_dring_data hdr = {
421 .tag = {
422 .type = VIO_TYPE_DATA,
423 .stype = VIO_SUBTYPE_INFO,
424 .stype_env = VIO_DRING_DATA,
425 .sid = vio_send_sid(&port->vio),
426 },
427 .dring_ident = dr->ident,
428 .start_idx = dr->prod,
429 .end_idx = dr->prod,
430 };
431 int err, delay;
432 int retries = 0;
433
434 hdr.seq = dr->snd_nxt;
435 delay = 1;
436 do {
437 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
438 if (err > 0) {
439 dr->snd_nxt++;
440 break;
441 }
442 udelay(delay);
443 if ((delay <<= 1) > 128)
444 delay = 128;
445 if (retries++ > VDC_MAX_RETRIES)
446 break;
447 } while (err == -EAGAIN);
448
449 if (err == -ENOTCONN)
450 vdc_ldc_reset(port);
451 return err;
452}
453
454static int __send_request(struct request *req)
455{
456 struct vdc_port *port = req->rq_disk->private_data;
457 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
458 struct scatterlist sg[port->ring_cookies];
459 struct vdc_req_entry *rqe;
460 struct vio_disk_desc *desc;
461 unsigned int map_perm;
462 int nsg, err, i;
463 u64 len;
464 u8 op;
465
466 map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
467
468 if (rq_data_dir(req) == READ) {
469 map_perm |= LDC_MAP_W;
470 op = VD_OP_BREAD;
471 } else {
472 map_perm |= LDC_MAP_R;
473 op = VD_OP_BWRITE;
474 }
475
476 sg_init_table(sg, port->ring_cookies);
477 nsg = blk_rq_map_sg(req->q, req, sg);
478
479 len = 0;
480 for (i = 0; i < nsg; i++)
481 len += sg[i].length;
482
483 desc = vio_dring_cur(dr);
484
485 err = ldc_map_sg(port->vio.lp, sg, nsg,
486 desc->cookies, port->ring_cookies,
487 map_perm);
488 if (err < 0) {
489 printk(KERN_ERR PFX "ldc_map_sg() failure, err=%d.\n", err);
490 return err;
491 }
492
493 rqe = &port->rq_arr[dr->prod];
494 rqe->req = req;
495
496 desc->hdr.ack = VIO_ACK_ENABLE;
497 desc->req_id = port->req_id;
498 desc->operation = op;
499 if (port->vdisk_type == VD_DISK_TYPE_DISK) {
500 desc->slice = 0xff;
501 } else {
502 desc->slice = 0;
503 }
504 desc->status = ~0;
505 desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
506 desc->size = len;
507 desc->ncookies = err;
508
509
510
511
512 wmb();
513 desc->hdr.state = VIO_DESC_READY;
514
515 err = __vdc_tx_trigger(port);
516 if (err < 0) {
517 printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err);
518 } else {
519 port->req_id++;
520 dr->prod = vio_dring_next(dr, dr->prod);
521 }
522
523 return err;
524}
525
526static blk_status_t vdc_queue_rq(struct blk_mq_hw_ctx *hctx,
527 const struct blk_mq_queue_data *bd)
528{
529 struct vdc_port *port = hctx->queue->queuedata;
530 struct vio_dring_state *dr;
531 unsigned long flags;
532
533 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
534
535 blk_mq_start_request(bd->rq);
536
537 spin_lock_irqsave(&port->vio.lock, flags);
538
539
540
541
542 if (unlikely(port->drain)) {
543 spin_unlock_irqrestore(&port->vio.lock, flags);
544 return BLK_STS_IOERR;
545 }
546
547 if (unlikely(vdc_tx_dring_avail(dr) < 1)) {
548 spin_unlock_irqrestore(&port->vio.lock, flags);
549 blk_mq_stop_hw_queue(hctx);
550 return BLK_STS_DEV_RESOURCE;
551 }
552
553 if (__send_request(bd->rq) < 0) {
554 spin_unlock_irqrestore(&port->vio.lock, flags);
555 return BLK_STS_IOERR;
556 }
557
558 spin_unlock_irqrestore(&port->vio.lock, flags);
559 return BLK_STS_OK;
560}
561
562static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
563{
564 struct vio_dring_state *dr;
565 struct vio_completion comp;
566 struct vio_disk_desc *desc;
567 unsigned int map_perm;
568 unsigned long flags;
569 int op_len, err;
570 void *req_buf;
571
572 if (!(((u64)1 << (u64)op) & port->operations))
573 return -EOPNOTSUPP;
574
575 switch (op) {
576 case VD_OP_BREAD:
577 case VD_OP_BWRITE:
578 default:
579 return -EINVAL;
580
581 case VD_OP_FLUSH:
582 op_len = 0;
583 map_perm = 0;
584 break;
585
586 case VD_OP_GET_WCE:
587 op_len = sizeof(u32);
588 map_perm = LDC_MAP_W;
589 break;
590
591 case VD_OP_SET_WCE:
592 op_len = sizeof(u32);
593 map_perm = LDC_MAP_R;
594 break;
595
596 case VD_OP_GET_VTOC:
597 op_len = sizeof(struct vio_disk_vtoc);
598 map_perm = LDC_MAP_W;
599 break;
600
601 case VD_OP_SET_VTOC:
602 op_len = sizeof(struct vio_disk_vtoc);
603 map_perm = LDC_MAP_R;
604 break;
605
606 case VD_OP_GET_DISKGEOM:
607 op_len = sizeof(struct vio_disk_geom);
608 map_perm = LDC_MAP_W;
609 break;
610
611 case VD_OP_SET_DISKGEOM:
612 op_len = sizeof(struct vio_disk_geom);
613 map_perm = LDC_MAP_R;
614 break;
615
616 case VD_OP_SCSICMD:
617 op_len = 16;
618 map_perm = LDC_MAP_RW;
619 break;
620
621 case VD_OP_GET_DEVID:
622 op_len = sizeof(struct vio_disk_devid);
623 map_perm = LDC_MAP_W;
624 break;
625
626 case VD_OP_GET_EFI:
627 case VD_OP_SET_EFI:
628 return -EOPNOTSUPP;
629 break;
630 };
631
632 map_perm |= LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
633
634 op_len = (op_len + 7) & ~7;
635 req_buf = kzalloc(op_len, GFP_KERNEL);
636 if (!req_buf)
637 return -ENOMEM;
638
639 if (len > op_len)
640 len = op_len;
641
642 if (map_perm & LDC_MAP_R)
643 memcpy(req_buf, buf, len);
644
645 spin_lock_irqsave(&port->vio.lock, flags);
646
647 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
648
649
650
651
652 desc = vio_dring_cur(dr);
653
654 err = ldc_map_single(port->vio.lp, req_buf, op_len,
655 desc->cookies, port->ring_cookies,
656 map_perm);
657 if (err < 0) {
658 spin_unlock_irqrestore(&port->vio.lock, flags);
659 kfree(req_buf);
660 return err;
661 }
662
663 init_completion(&comp.com);
664 comp.waiting_for = WAITING_FOR_GEN_CMD;
665 port->vio.cmp = ∁
666
667 desc->hdr.ack = VIO_ACK_ENABLE;
668 desc->req_id = port->req_id;
669 desc->operation = op;
670 desc->slice = 0;
671 desc->status = ~0;
672 desc->offset = 0;
673 desc->size = op_len;
674 desc->ncookies = err;
675
676
677
678
679 wmb();
680 desc->hdr.state = VIO_DESC_READY;
681
682 err = __vdc_tx_trigger(port);
683 if (err >= 0) {
684 port->req_id++;
685 dr->prod = vio_dring_next(dr, dr->prod);
686 spin_unlock_irqrestore(&port->vio.lock, flags);
687
688 wait_for_completion(&comp.com);
689 err = comp.err;
690 } else {
691 port->vio.cmp = NULL;
692 spin_unlock_irqrestore(&port->vio.lock, flags);
693 }
694
695 if (map_perm & LDC_MAP_W)
696 memcpy(buf, req_buf, len);
697
698 kfree(req_buf);
699
700 return err;
701}
702
703static int vdc_alloc_tx_ring(struct vdc_port *port)
704{
705 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
706 unsigned long len, entry_size;
707 int ncookies;
708 void *dring;
709
710 entry_size = sizeof(struct vio_disk_desc) +
711 (sizeof(struct ldc_trans_cookie) * port->ring_cookies);
712 len = (VDC_TX_RING_SIZE * entry_size);
713
714 ncookies = VIO_MAX_RING_COOKIES;
715 dring = ldc_alloc_exp_dring(port->vio.lp, len,
716 dr->cookies, &ncookies,
717 (LDC_MAP_SHADOW |
718 LDC_MAP_DIRECT |
719 LDC_MAP_RW));
720 if (IS_ERR(dring))
721 return PTR_ERR(dring);
722
723 dr->base = dring;
724 dr->entry_size = entry_size;
725 dr->num_entries = VDC_TX_RING_SIZE;
726 dr->prod = dr->cons = 0;
727 dr->pending = VDC_TX_RING_SIZE;
728 dr->ncookies = ncookies;
729
730 return 0;
731}
732
733static void vdc_free_tx_ring(struct vdc_port *port)
734{
735 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
736
737 if (dr->base) {
738 ldc_free_exp_dring(port->vio.lp, dr->base,
739 (dr->entry_size * dr->num_entries),
740 dr->cookies, dr->ncookies);
741 dr->base = NULL;
742 dr->entry_size = 0;
743 dr->num_entries = 0;
744 dr->pending = 0;
745 dr->ncookies = 0;
746 }
747}
748
749static int vdc_port_up(struct vdc_port *port)
750{
751 struct vio_completion comp;
752
753 init_completion(&comp.com);
754 comp.err = 0;
755 comp.waiting_for = WAITING_FOR_LINK_UP;
756 port->vio.cmp = ∁
757
758 vio_port_up(&port->vio);
759 wait_for_completion(&comp.com);
760 return comp.err;
761}
762
763static void vdc_port_down(struct vdc_port *port)
764{
765 ldc_disconnect(port->vio.lp);
766 ldc_unbind(port->vio.lp);
767 vdc_free_tx_ring(port);
768 vio_ldc_free(&port->vio);
769}
770
771static const struct blk_mq_ops vdc_mq_ops = {
772 .queue_rq = vdc_queue_rq,
773};
774
775static void cleanup_queue(struct request_queue *q)
776{
777 struct vdc_port *port = q->queuedata;
778
779 blk_cleanup_queue(q);
780 blk_mq_free_tag_set(&port->tag_set);
781}
782
783static struct request_queue *init_queue(struct vdc_port *port)
784{
785 struct request_queue *q;
786
787 q = blk_mq_init_sq_queue(&port->tag_set, &vdc_mq_ops, VDC_TX_RING_SIZE,
788 BLK_MQ_F_SHOULD_MERGE);
789 if (IS_ERR(q))
790 return q;
791
792 q->queuedata = port;
793 return q;
794}
795
796static int probe_disk(struct vdc_port *port)
797{
798 struct request_queue *q;
799 struct gendisk *g;
800 int err;
801
802 err = vdc_port_up(port);
803 if (err)
804 return err;
805
806
807
808
809 if (vdc_version_supported(port, 1, 2) && !port->vdisk_phys_blksz)
810 return -ENODEV;
811
812 if (vdc_version_supported(port, 1, 1)) {
813
814
815
816 if (port->vdisk_size == -1)
817 return -ENODEV;
818 } else {
819 struct vio_disk_geom geom;
820
821 err = generic_request(port, VD_OP_GET_DISKGEOM,
822 &geom, sizeof(geom));
823 if (err < 0) {
824 printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
825 "error %d\n", err);
826 return err;
827 }
828 port->vdisk_size = ((u64)geom.num_cyl *
829 (u64)geom.num_hd *
830 (u64)geom.num_sec);
831 }
832
833 q = init_queue(port);
834 if (IS_ERR(q)) {
835 printk(KERN_ERR PFX "%s: Could not allocate queue.\n",
836 port->vio.name);
837 return PTR_ERR(q);
838 }
839 g = alloc_disk(1 << PARTITION_SHIFT);
840 if (!g) {
841 printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n",
842 port->vio.name);
843 cleanup_queue(q);
844 return -ENOMEM;
845 }
846
847 port->disk = g;
848
849
850 blk_queue_segment_boundary(q, PAGE_SIZE - 1);
851 blk_queue_max_segment_size(q, PAGE_SIZE);
852
853 blk_queue_max_segments(q, port->ring_cookies);
854 blk_queue_max_hw_sectors(q, port->max_xfer_size);
855 g->major = vdc_major;
856 g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
857 strcpy(g->disk_name, port->disk_name);
858
859 g->fops = &vdc_fops;
860 g->queue = q;
861 g->private_data = port;
862
863 set_capacity(g, port->vdisk_size);
864
865 if (vdc_version_supported(port, 1, 1)) {
866 switch (port->vdisk_mtype) {
867 case VD_MEDIA_TYPE_CD:
868 pr_info(PFX "Virtual CDROM %s\n", port->disk_name);
869 g->flags |= GENHD_FL_CD;
870 g->flags |= GENHD_FL_REMOVABLE;
871 set_disk_ro(g, 1);
872 break;
873
874 case VD_MEDIA_TYPE_DVD:
875 pr_info(PFX "Virtual DVD %s\n", port->disk_name);
876 g->flags |= GENHD_FL_CD;
877 g->flags |= GENHD_FL_REMOVABLE;
878 set_disk_ro(g, 1);
879 break;
880
881 case VD_MEDIA_TYPE_FIXED:
882 pr_info(PFX "Virtual Hard disk %s\n", port->disk_name);
883 break;
884 }
885 }
886
887 blk_queue_physical_block_size(q, port->vdisk_phys_blksz);
888
889 pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n",
890 g->disk_name,
891 port->vdisk_size, (port->vdisk_size >> (20 - 9)),
892 port->vio.ver.major, port->vio.ver.minor);
893
894 device_add_disk(&port->vio.vdev->dev, g);
895
896 return 0;
897}
898
899static struct ldc_channel_config vdc_ldc_cfg = {
900 .event = vdc_event,
901 .mtu = 64,
902 .mode = LDC_MODE_UNRELIABLE,
903};
904
905static struct vio_driver_ops vdc_vio_ops = {
906 .send_attr = vdc_send_attr,
907 .handle_attr = vdc_handle_attr,
908 .handshake_complete = vdc_handshake_complete,
909};
910
911static void print_version(void)
912{
913 static int version_printed;
914
915 if (version_printed++ == 0)
916 printk(KERN_INFO "%s", version);
917}
918
919struct vdc_check_port_data {
920 int dev_no;
921 char *type;
922};
923
924static int vdc_device_probed(struct device *dev, void *arg)
925{
926 struct vio_dev *vdev = to_vio_dev(dev);
927 struct vdc_check_port_data *port_data;
928
929 port_data = (struct vdc_check_port_data *)arg;
930
931 if ((vdev->dev_no == port_data->dev_no) &&
932 (!(strcmp((char *)&vdev->type, port_data->type))) &&
933 dev_get_drvdata(dev)) {
934
935
936
937 return 1;
938 } else {
939 return 0;
940 }
941}
942
943
944
945
946
947
948
949
950
951
952static bool vdc_port_mpgroup_check(struct vio_dev *vdev)
953{
954 struct vdc_check_port_data port_data;
955 struct device *dev;
956
957 port_data.dev_no = vdev->dev_no;
958 port_data.type = (char *)&vdev->type;
959
960 dev = device_find_child(vdev->dev.parent, &port_data,
961 vdc_device_probed);
962
963 if (dev)
964 return true;
965
966 return false;
967}
968
969static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
970{
971 struct mdesc_handle *hp;
972 struct vdc_port *port;
973 int err;
974 const u64 *ldc_timeout;
975
976 print_version();
977
978 hp = mdesc_grab();
979
980 err = -ENODEV;
981 if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
982 printk(KERN_ERR PFX "Port id [%llu] too large.\n",
983 vdev->dev_no);
984 goto err_out_release_mdesc;
985 }
986
987
988 if (vdc_port_mpgroup_check(vdev)) {
989 printk(KERN_WARNING
990 "VIO: Ignoring extra vdisk port %s",
991 dev_name(&vdev->dev));
992 goto err_out_release_mdesc;
993 }
994
995 port = kzalloc(sizeof(*port), GFP_KERNEL);
996 err = -ENOMEM;
997 if (!port) {
998 printk(KERN_ERR PFX "Cannot allocate vdc_port.\n");
999 goto err_out_release_mdesc;
1000 }
1001
1002 if (vdev->dev_no >= 26)
1003 snprintf(port->disk_name, sizeof(port->disk_name),
1004 VDCBLK_NAME "%c%c",
1005 'a' + ((int)vdev->dev_no / 26) - 1,
1006 'a' + ((int)vdev->dev_no % 26));
1007 else
1008 snprintf(port->disk_name, sizeof(port->disk_name),
1009 VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
1010 port->vdisk_size = -1;
1011
1012
1013
1014
1015
1016 ldc_timeout = mdesc_get_property(hp, vdev->mp, "vdc-timeout", NULL);
1017 port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0;
1018 INIT_DELAYED_WORK(&port->ldc_reset_timer_work, vdc_ldc_reset_timer_work);
1019 INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work);
1020
1021 err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
1022 vdc_versions, ARRAY_SIZE(vdc_versions),
1023 &vdc_vio_ops, port->disk_name);
1024 if (err)
1025 goto err_out_free_port;
1026
1027 port->vdisk_block_size = VDC_DEFAULT_BLK_SIZE;
1028 port->max_xfer_size = ((128 * 1024) / port->vdisk_block_size);
1029 port->ring_cookies = ((port->max_xfer_size *
1030 port->vdisk_block_size) / PAGE_SIZE) + 2;
1031
1032 err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
1033 if (err)
1034 goto err_out_free_port;
1035
1036 err = vdc_alloc_tx_ring(port);
1037 if (err)
1038 goto err_out_free_ldc;
1039
1040 err = probe_disk(port);
1041 if (err)
1042 goto err_out_free_tx_ring;
1043
1044
1045
1046
1047 dev_set_drvdata(&vdev->dev, port);
1048
1049 mdesc_release(hp);
1050
1051 return 0;
1052
1053err_out_free_tx_ring:
1054 vdc_free_tx_ring(port);
1055
1056err_out_free_ldc:
1057 vio_ldc_free(&port->vio);
1058
1059err_out_free_port:
1060 kfree(port);
1061
1062err_out_release_mdesc:
1063 mdesc_release(hp);
1064 return err;
1065}
1066
1067static int vdc_port_remove(struct vio_dev *vdev)
1068{
1069 struct vdc_port *port = dev_get_drvdata(&vdev->dev);
1070
1071 if (port) {
1072 blk_mq_stop_hw_queues(port->disk->queue);
1073
1074 flush_work(&port->ldc_reset_work);
1075 cancel_delayed_work_sync(&port->ldc_reset_timer_work);
1076 del_timer_sync(&port->vio.timer);
1077
1078 del_gendisk(port->disk);
1079 cleanup_queue(port->disk->queue);
1080 put_disk(port->disk);
1081 port->disk = NULL;
1082
1083 vdc_free_tx_ring(port);
1084 vio_ldc_free(&port->vio);
1085
1086 dev_set_drvdata(&vdev->dev, NULL);
1087
1088 kfree(port);
1089 }
1090 return 0;
1091}
1092
1093static void vdc_requeue_inflight(struct vdc_port *port)
1094{
1095 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1096 u32 idx;
1097
1098 for (idx = dr->cons; idx != dr->prod; idx = vio_dring_next(dr, idx)) {
1099 struct vio_disk_desc *desc = vio_dring_entry(dr, idx);
1100 struct vdc_req_entry *rqe = &port->rq_arr[idx];
1101 struct request *req;
1102
1103 ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
1104 desc->hdr.state = VIO_DESC_FREE;
1105 dr->cons = vio_dring_next(dr, idx);
1106
1107 req = rqe->req;
1108 if (req == NULL) {
1109 vdc_end_special(port, desc);
1110 continue;
1111 }
1112
1113 rqe->req = NULL;
1114 blk_mq_requeue_request(req, false);
1115 }
1116}
1117
1118static void vdc_queue_drain(struct vdc_port *port)
1119{
1120 struct request_queue *q = port->disk->queue;
1121
1122
1123
1124
1125
1126 port->drain = 1;
1127 spin_unlock_irq(&port->vio.lock);
1128
1129 blk_mq_freeze_queue(q);
1130 blk_mq_quiesce_queue(q);
1131
1132 spin_lock_irq(&port->vio.lock);
1133 port->drain = 0;
1134 blk_mq_unquiesce_queue(q);
1135 blk_mq_unfreeze_queue(q);
1136}
1137
1138static void vdc_ldc_reset_timer_work(struct work_struct *work)
1139{
1140 struct vdc_port *port;
1141 struct vio_driver_state *vio;
1142
1143 port = container_of(work, struct vdc_port, ldc_reset_timer_work.work);
1144 vio = &port->vio;
1145
1146 spin_lock_irq(&vio->lock);
1147 if (!(port->vio.hs_state & VIO_HS_COMPLETE)) {
1148 pr_warn(PFX "%s ldc down %llu seconds, draining queue\n",
1149 port->disk_name, port->ldc_timeout);
1150 vdc_queue_drain(port);
1151 vdc_blk_queue_start(port);
1152 }
1153 spin_unlock_irq(&vio->lock);
1154}
1155
1156static void vdc_ldc_reset_work(struct work_struct *work)
1157{
1158 struct vdc_port *port;
1159 struct vio_driver_state *vio;
1160 unsigned long flags;
1161
1162 port = container_of(work, struct vdc_port, ldc_reset_work);
1163 vio = &port->vio;
1164
1165 spin_lock_irqsave(&vio->lock, flags);
1166 vdc_ldc_reset(port);
1167 spin_unlock_irqrestore(&vio->lock, flags);
1168}
1169
1170static void vdc_ldc_reset(struct vdc_port *port)
1171{
1172 int err;
1173
1174 assert_spin_locked(&port->vio.lock);
1175
1176 pr_warn(PFX "%s ldc link reset\n", port->disk_name);
1177 blk_mq_stop_hw_queues(port->disk->queue);
1178 vdc_requeue_inflight(port);
1179 vdc_port_down(port);
1180
1181 err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
1182 if (err) {
1183 pr_err(PFX "%s vio_ldc_alloc:%d\n", port->disk_name, err);
1184 return;
1185 }
1186
1187 err = vdc_alloc_tx_ring(port);
1188 if (err) {
1189 pr_err(PFX "%s vio_alloc_tx_ring:%d\n", port->disk_name, err);
1190 goto err_free_ldc;
1191 }
1192
1193 if (port->ldc_timeout)
1194 mod_delayed_work(system_wq, &port->ldc_reset_timer_work,
1195 round_jiffies(jiffies + HZ * port->ldc_timeout));
1196 mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ));
1197 return;
1198
1199err_free_ldc:
1200 vio_ldc_free(&port->vio);
1201}
1202
1203static const struct vio_device_id vdc_port_match[] = {
1204 {
1205 .type = "vdc-port",
1206 },
1207 {},
1208};
1209MODULE_DEVICE_TABLE(vio, vdc_port_match);
1210
1211static struct vio_driver vdc_port_driver = {
1212 .id_table = vdc_port_match,
1213 .probe = vdc_port_probe,
1214 .remove = vdc_port_remove,
1215 .name = "vdc_port",
1216};
1217
1218static int __init vdc_init(void)
1219{
1220 int err;
1221
1222 sunvdc_wq = alloc_workqueue("sunvdc", 0, 0);
1223 if (!sunvdc_wq)
1224 return -ENOMEM;
1225
1226 err = register_blkdev(0, VDCBLK_NAME);
1227 if (err < 0)
1228 goto out_free_wq;
1229
1230 vdc_major = err;
1231
1232 err = vio_register_driver(&vdc_port_driver);
1233 if (err)
1234 goto out_unregister_blkdev;
1235
1236 return 0;
1237
1238out_unregister_blkdev:
1239 unregister_blkdev(vdc_major, VDCBLK_NAME);
1240 vdc_major = 0;
1241
1242out_free_wq:
1243 destroy_workqueue(sunvdc_wq);
1244 return err;
1245}
1246
1247static void __exit vdc_exit(void)
1248{
1249 vio_unregister_driver(&vdc_port_driver);
1250 unregister_blkdev(vdc_major, VDCBLK_NAME);
1251 destroy_workqueue(sunvdc_wq);
1252}
1253
1254module_init(vdc_init);
1255module_exit(vdc_exit);
1256