linux/drivers/block/sunvdc.c
<<
>>
Prefs
   1/* sunvdc.c: Sun LDOM Virtual Disk Client.
   2 *
   3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
   4 */
   5
   6#include <linux/module.h>
   7#include <linux/kernel.h>
   8#include <linux/types.h>
   9#include <linux/blkdev.h>
  10#include <linux/hdreg.h>
  11#include <linux/genhd.h>
  12#include <linux/slab.h>
  13#include <linux/spinlock.h>
  14#include <linux/completion.h>
  15#include <linux/delay.h>
  16#include <linux/init.h>
  17#include <linux/list.h>
  18#include <linux/scatterlist.h>
  19
  20#include <asm/vio.h>
  21#include <asm/ldc.h>
  22
  23#define DRV_MODULE_NAME         "sunvdc"
  24#define PFX DRV_MODULE_NAME     ": "
  25#define DRV_MODULE_VERSION      "1.0"
  26#define DRV_MODULE_RELDATE      "June 25, 2007"
  27
  28static char version[] __devinitdata =
  29        DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  30MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
  31MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
  32MODULE_LICENSE("GPL");
  33MODULE_VERSION(DRV_MODULE_VERSION);
  34
  35#define VDC_TX_RING_SIZE        256
  36
  37#define WAITING_FOR_LINK_UP     0x01
  38#define WAITING_FOR_TX_SPACE    0x02
  39#define WAITING_FOR_GEN_CMD     0x04
  40#define WAITING_FOR_ANY         -1
  41
  42struct vdc_req_entry {
  43        struct request          *req;
  44};
  45
  46struct vdc_port {
  47        struct vio_driver_state vio;
  48
  49        struct gendisk          *disk;
  50
  51        struct vdc_completion   *cmp;
  52
  53        u64                     req_id;
  54        u64                     seq;
  55        struct vdc_req_entry    rq_arr[VDC_TX_RING_SIZE];
  56
  57        unsigned long           ring_cookies;
  58
  59        u64                     max_xfer_size;
  60        u32                     vdisk_block_size;
  61
  62        /* The server fills these in for us in the disk attribute
  63         * ACK packet.
  64         */
  65        u64                     operations;
  66        u32                     vdisk_size;
  67        u8                      vdisk_type;
  68
  69        char                    disk_name[32];
  70
  71        struct vio_disk_geom    geom;
  72        struct vio_disk_vtoc    label;
  73};
  74
  75static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
  76{
  77        return container_of(vio, struct vdc_port, vio);
  78}
  79
  80/* Ordered from largest major to lowest */
  81static struct vio_version vdc_versions[] = {
  82        { .major = 1, .minor = 0 },
  83};
  84
  85#define VDCBLK_NAME     "vdisk"
  86static int vdc_major;
  87#define PARTITION_SHIFT 3
  88
  89static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
  90{
  91        return vio_dring_avail(dr, VDC_TX_RING_SIZE);
  92}
  93
  94static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  95{
  96        struct gendisk *disk = bdev->bd_disk;
  97        struct vdc_port *port = disk->private_data;
  98
  99        geo->heads = (u8) port->geom.num_hd;
 100        geo->sectors = (u8) port->geom.num_sec;
 101        geo->cylinders = port->geom.num_cyl;
 102
 103        return 0;
 104}
 105
 106static const struct block_device_operations vdc_fops = {
 107        .owner          = THIS_MODULE,
 108        .getgeo         = vdc_getgeo,
 109};
 110
 111static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
 112{
 113        if (vio->cmp &&
 114            (waiting_for == -1 ||
 115             vio->cmp->waiting_for == waiting_for)) {
 116                vio->cmp->err = err;
 117                complete(&vio->cmp->com);
 118                vio->cmp = NULL;
 119        }
 120}
 121
 122static void vdc_handshake_complete(struct vio_driver_state *vio)
 123{
 124        vdc_finish(vio, 0, WAITING_FOR_LINK_UP);
 125}
 126
 127static int vdc_handle_unknown(struct vdc_port *port, void *arg)
 128{
 129        struct vio_msg_tag *pkt = arg;
 130
 131        printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n",
 132               pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
 133        printk(KERN_ERR PFX "Resetting connection.\n");
 134
 135        ldc_disconnect(port->vio.lp);
 136
 137        return -ECONNRESET;
 138}
 139
 140static int vdc_send_attr(struct vio_driver_state *vio)
 141{
 142        struct vdc_port *port = to_vdc_port(vio);
 143        struct vio_disk_attr_info pkt;
 144
 145        memset(&pkt, 0, sizeof(pkt));
 146
 147        pkt.tag.type = VIO_TYPE_CTRL;
 148        pkt.tag.stype = VIO_SUBTYPE_INFO;
 149        pkt.tag.stype_env = VIO_ATTR_INFO;
 150        pkt.tag.sid = vio_send_sid(vio);
 151
 152        pkt.xfer_mode = VIO_DRING_MODE;
 153        pkt.vdisk_block_size = port->vdisk_block_size;
 154        pkt.max_xfer_size = port->max_xfer_size;
 155
 156        viodbg(HS, "SEND ATTR xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
 157               pkt.xfer_mode, pkt.vdisk_block_size, pkt.max_xfer_size);
 158
 159        return vio_ldc_send(&port->vio, &pkt, sizeof(pkt));
 160}
 161
 162static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
 163{
 164        struct vdc_port *port = to_vdc_port(vio);
 165        struct vio_disk_attr_info *pkt = arg;
 166
 167        viodbg(HS, "GOT ATTR stype[0x%x] ops[%llx] disk_size[%llu] disk_type[%x] "
 168               "xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
 169               pkt->tag.stype, pkt->operations,
 170               pkt->vdisk_size, pkt->vdisk_type,
 171               pkt->xfer_mode, pkt->vdisk_block_size,
 172               pkt->max_xfer_size);
 173
 174        if (pkt->tag.stype == VIO_SUBTYPE_ACK) {
 175                switch (pkt->vdisk_type) {
 176                case VD_DISK_TYPE_DISK:
 177                case VD_DISK_TYPE_SLICE:
 178                        break;
 179
 180                default:
 181                        printk(KERN_ERR PFX "%s: Bogus vdisk_type 0x%x\n",
 182                               vio->name, pkt->vdisk_type);
 183                        return -ECONNRESET;
 184                }
 185
 186                if (pkt->vdisk_block_size > port->vdisk_block_size) {
 187                        printk(KERN_ERR PFX "%s: BLOCK size increased "
 188                               "%u --> %u\n",
 189                               vio->name,
 190                               port->vdisk_block_size, pkt->vdisk_block_size);
 191                        return -ECONNRESET;
 192                }
 193
 194                port->operations = pkt->operations;
 195                port->vdisk_size = pkt->vdisk_size;
 196                port->vdisk_type = pkt->vdisk_type;
 197                if (pkt->max_xfer_size < port->max_xfer_size)
 198                        port->max_xfer_size = pkt->max_xfer_size;
 199                port->vdisk_block_size = pkt->vdisk_block_size;
 200                return 0;
 201        } else {
 202                printk(KERN_ERR PFX "%s: Attribute NACK\n", vio->name);
 203
 204                return -ECONNRESET;
 205        }
 206}
 207
 208static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
 209{
 210        int err = desc->status;
 211
 212        vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
 213}
 214
 215static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
 216                        unsigned int index)
 217{
 218        struct vio_disk_desc *desc = vio_dring_entry(dr, index);
 219        struct vdc_req_entry *rqe = &port->rq_arr[index];
 220        struct request *req;
 221
 222        if (unlikely(desc->hdr.state != VIO_DESC_DONE))
 223                return;
 224
 225        ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
 226        desc->hdr.state = VIO_DESC_FREE;
 227        dr->cons = (index + 1) & (VDC_TX_RING_SIZE - 1);
 228
 229        req = rqe->req;
 230        if (req == NULL) {
 231                vdc_end_special(port, desc);
 232                return;
 233        }
 234
 235        rqe->req = NULL;
 236
 237        __blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
 238
 239        if (blk_queue_stopped(port->disk->queue))
 240                blk_start_queue(port->disk->queue);
 241}
 242
 243static int vdc_ack(struct vdc_port *port, void *msgbuf)
 244{
 245        struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 246        struct vio_dring_data *pkt = msgbuf;
 247
 248        if (unlikely(pkt->dring_ident != dr->ident ||
 249                     pkt->start_idx != pkt->end_idx ||
 250                     pkt->start_idx >= VDC_TX_RING_SIZE))
 251                return 0;
 252
 253        vdc_end_one(port, dr, pkt->start_idx);
 254
 255        return 0;
 256}
 257
 258static int vdc_nack(struct vdc_port *port, void *msgbuf)
 259{
 260        /* XXX Implement me XXX */
 261        return 0;
 262}
 263
 264static void vdc_event(void *arg, int event)
 265{
 266        struct vdc_port *port = arg;
 267        struct vio_driver_state *vio = &port->vio;
 268        unsigned long flags;
 269        int err;
 270
 271        spin_lock_irqsave(&vio->lock, flags);
 272
 273        if (unlikely(event == LDC_EVENT_RESET ||
 274                     event == LDC_EVENT_UP)) {
 275                vio_link_state_change(vio, event);
 276                spin_unlock_irqrestore(&vio->lock, flags);
 277                return;
 278        }
 279
 280        if (unlikely(event != LDC_EVENT_DATA_READY)) {
 281                printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event);
 282                spin_unlock_irqrestore(&vio->lock, flags);
 283                return;
 284        }
 285
 286        err = 0;
 287        while (1) {
 288                union {
 289                        struct vio_msg_tag tag;
 290                        u64 raw[8];
 291                } msgbuf;
 292
 293                err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
 294                if (unlikely(err < 0)) {
 295                        if (err == -ECONNRESET)
 296                                vio_conn_reset(vio);
 297                        break;
 298                }
 299                if (err == 0)
 300                        break;
 301                viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
 302                       msgbuf.tag.type,
 303                       msgbuf.tag.stype,
 304                       msgbuf.tag.stype_env,
 305                       msgbuf.tag.sid);
 306                err = vio_validate_sid(vio, &msgbuf.tag);
 307                if (err < 0)
 308                        break;
 309
 310                if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
 311                        if (msgbuf.tag.stype == VIO_SUBTYPE_ACK)
 312                                err = vdc_ack(port, &msgbuf);
 313                        else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK)
 314                                err = vdc_nack(port, &msgbuf);
 315                        else
 316                                err = vdc_handle_unknown(port, &msgbuf);
 317                } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
 318                        err = vio_control_pkt_engine(vio, &msgbuf);
 319                } else {
 320                        err = vdc_handle_unknown(port, &msgbuf);
 321                }
 322                if (err < 0)
 323                        break;
 324        }
 325        if (err < 0)
 326                vdc_finish(&port->vio, err, WAITING_FOR_ANY);
 327        spin_unlock_irqrestore(&vio->lock, flags);
 328}
 329
 330static int __vdc_tx_trigger(struct vdc_port *port)
 331{
 332        struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 333        struct vio_dring_data hdr = {
 334                .tag = {
 335                        .type           = VIO_TYPE_DATA,
 336                        .stype          = VIO_SUBTYPE_INFO,
 337                        .stype_env      = VIO_DRING_DATA,
 338                        .sid            = vio_send_sid(&port->vio),
 339                },
 340                .dring_ident            = dr->ident,
 341                .start_idx              = dr->prod,
 342                .end_idx                = dr->prod,
 343        };
 344        int err, delay;
 345
 346        hdr.seq = dr->snd_nxt;
 347        delay = 1;
 348        do {
 349                err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
 350                if (err > 0) {
 351                        dr->snd_nxt++;
 352                        break;
 353                }
 354                udelay(delay);
 355                if ((delay <<= 1) > 128)
 356                        delay = 128;
 357        } while (err == -EAGAIN);
 358
 359        return err;
 360}
 361
 362static int __send_request(struct request *req)
 363{
 364        struct vdc_port *port = req->rq_disk->private_data;
 365        struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 366        struct scatterlist sg[port->ring_cookies];
 367        struct vdc_req_entry *rqe;
 368        struct vio_disk_desc *desc;
 369        unsigned int map_perm;
 370        int nsg, err, i;
 371        u64 len;
 372        u8 op;
 373
 374        map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
 375
 376        if (rq_data_dir(req) == READ) {
 377                map_perm |= LDC_MAP_W;
 378                op = VD_OP_BREAD;
 379        } else {
 380                map_perm |= LDC_MAP_R;
 381                op = VD_OP_BWRITE;
 382        }
 383
 384        sg_init_table(sg, port->ring_cookies);
 385        nsg = blk_rq_map_sg(req->q, req, sg);
 386
 387        len = 0;
 388        for (i = 0; i < nsg; i++)
 389                len += sg[i].length;
 390
 391        if (unlikely(vdc_tx_dring_avail(dr) < 1)) {
 392                blk_stop_queue(port->disk->queue);
 393                err = -ENOMEM;
 394                goto out;
 395        }
 396
 397        desc = vio_dring_cur(dr);
 398
 399        err = ldc_map_sg(port->vio.lp, sg, nsg,
 400                         desc->cookies, port->ring_cookies,
 401                         map_perm);
 402        if (err < 0) {
 403                printk(KERN_ERR PFX "ldc_map_sg() failure, err=%d.\n", err);
 404                return err;
 405        }
 406
 407        rqe = &port->rq_arr[dr->prod];
 408        rqe->req = req;
 409
 410        desc->hdr.ack = VIO_ACK_ENABLE;
 411        desc->req_id = port->req_id;
 412        desc->operation = op;
 413        if (port->vdisk_type == VD_DISK_TYPE_DISK) {
 414                desc->slice = 0xff;
 415        } else {
 416                desc->slice = 0;
 417        }
 418        desc->status = ~0;
 419        desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
 420        desc->size = len;
 421        desc->ncookies = err;
 422
 423        /* This has to be a non-SMP write barrier because we are writing
 424         * to memory which is shared with the peer LDOM.
 425         */
 426        wmb();
 427        desc->hdr.state = VIO_DESC_READY;
 428
 429        err = __vdc_tx_trigger(port);
 430        if (err < 0) {
 431                printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err);
 432        } else {
 433                port->req_id++;
 434                dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
 435        }
 436out:
 437
 438        return err;
 439}
 440
 441static void do_vdc_request(struct request_queue *q)
 442{
 443        while (1) {
 444                struct request *req = blk_fetch_request(q);
 445
 446                if (!req)
 447                        break;
 448
 449                if (__send_request(req) < 0)
 450                        __blk_end_request_all(req, -EIO);
 451        }
 452}
 453
 454static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
 455{
 456        struct vio_dring_state *dr;
 457        struct vio_completion comp;
 458        struct vio_disk_desc *desc;
 459        unsigned int map_perm;
 460        unsigned long flags;
 461        int op_len, err;
 462        void *req_buf;
 463
 464        if (!(((u64)1 << ((u64)op - 1)) & port->operations))
 465                return -EOPNOTSUPP;
 466
 467        switch (op) {
 468        case VD_OP_BREAD:
 469        case VD_OP_BWRITE:
 470        default:
 471                return -EINVAL;
 472
 473        case VD_OP_FLUSH:
 474                op_len = 0;
 475                map_perm = 0;
 476                break;
 477
 478        case VD_OP_GET_WCE:
 479                op_len = sizeof(u32);
 480                map_perm = LDC_MAP_W;
 481                break;
 482
 483        case VD_OP_SET_WCE:
 484                op_len = sizeof(u32);
 485                map_perm = LDC_MAP_R;
 486                break;
 487
 488        case VD_OP_GET_VTOC:
 489                op_len = sizeof(struct vio_disk_vtoc);
 490                map_perm = LDC_MAP_W;
 491                break;
 492
 493        case VD_OP_SET_VTOC:
 494                op_len = sizeof(struct vio_disk_vtoc);
 495                map_perm = LDC_MAP_R;
 496                break;
 497
 498        case VD_OP_GET_DISKGEOM:
 499                op_len = sizeof(struct vio_disk_geom);
 500                map_perm = LDC_MAP_W;
 501                break;
 502
 503        case VD_OP_SET_DISKGEOM:
 504                op_len = sizeof(struct vio_disk_geom);
 505                map_perm = LDC_MAP_R;
 506                break;
 507
 508        case VD_OP_SCSICMD:
 509                op_len = 16;
 510                map_perm = LDC_MAP_RW;
 511                break;
 512
 513        case VD_OP_GET_DEVID:
 514                op_len = sizeof(struct vio_disk_devid);
 515                map_perm = LDC_MAP_W;
 516                break;
 517
 518        case VD_OP_GET_EFI:
 519        case VD_OP_SET_EFI:
 520                return -EOPNOTSUPP;
 521                break;
 522        };
 523
 524        map_perm |= LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
 525
 526        op_len = (op_len + 7) & ~7;
 527        req_buf = kzalloc(op_len, GFP_KERNEL);
 528        if (!req_buf)
 529                return -ENOMEM;
 530
 531        if (len > op_len)
 532                len = op_len;
 533
 534        if (map_perm & LDC_MAP_R)
 535                memcpy(req_buf, buf, len);
 536
 537        spin_lock_irqsave(&port->vio.lock, flags);
 538
 539        dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 540
 541        /* XXX If we want to use this code generically we have to
 542         * XXX handle TX ring exhaustion etc.
 543         */
 544        desc = vio_dring_cur(dr);
 545
 546        err = ldc_map_single(port->vio.lp, req_buf, op_len,
 547                             desc->cookies, port->ring_cookies,
 548                             map_perm);
 549        if (err < 0) {
 550                spin_unlock_irqrestore(&port->vio.lock, flags);
 551                kfree(req_buf);
 552                return err;
 553        }
 554
 555        init_completion(&comp.com);
 556        comp.waiting_for = WAITING_FOR_GEN_CMD;
 557        port->vio.cmp = &comp;
 558
 559        desc->hdr.ack = VIO_ACK_ENABLE;
 560        desc->req_id = port->req_id;
 561        desc->operation = op;
 562        desc->slice = 0;
 563        desc->status = ~0;
 564        desc->offset = 0;
 565        desc->size = op_len;
 566        desc->ncookies = err;
 567
 568        /* This has to be a non-SMP write barrier because we are writing
 569         * to memory which is shared with the peer LDOM.
 570         */
 571        wmb();
 572        desc->hdr.state = VIO_DESC_READY;
 573
 574        err = __vdc_tx_trigger(port);
 575        if (err >= 0) {
 576                port->req_id++;
 577                dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
 578                spin_unlock_irqrestore(&port->vio.lock, flags);
 579
 580                wait_for_completion(&comp.com);
 581                err = comp.err;
 582        } else {
 583                port->vio.cmp = NULL;
 584                spin_unlock_irqrestore(&port->vio.lock, flags);
 585        }
 586
 587        if (map_perm & LDC_MAP_W)
 588                memcpy(buf, req_buf, len);
 589
 590        kfree(req_buf);
 591
 592        return err;
 593}
 594
 595static int __devinit vdc_alloc_tx_ring(struct vdc_port *port)
 596{
 597        struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 598        unsigned long len, entry_size;
 599        int ncookies;
 600        void *dring;
 601
 602        entry_size = sizeof(struct vio_disk_desc) +
 603                (sizeof(struct ldc_trans_cookie) * port->ring_cookies);
 604        len = (VDC_TX_RING_SIZE * entry_size);
 605
 606        ncookies = VIO_MAX_RING_COOKIES;
 607        dring = ldc_alloc_exp_dring(port->vio.lp, len,
 608                                    dr->cookies, &ncookies,
 609                                    (LDC_MAP_SHADOW |
 610                                     LDC_MAP_DIRECT |
 611                                     LDC_MAP_RW));
 612        if (IS_ERR(dring))
 613                return PTR_ERR(dring);
 614
 615        dr->base = dring;
 616        dr->entry_size = entry_size;
 617        dr->num_entries = VDC_TX_RING_SIZE;
 618        dr->prod = dr->cons = 0;
 619        dr->pending = VDC_TX_RING_SIZE;
 620        dr->ncookies = ncookies;
 621
 622        return 0;
 623}
 624
 625static void vdc_free_tx_ring(struct vdc_port *port)
 626{
 627        struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
 628
 629        if (dr->base) {
 630                ldc_free_exp_dring(port->vio.lp, dr->base,
 631                                   (dr->entry_size * dr->num_entries),
 632                                   dr->cookies, dr->ncookies);
 633                dr->base = NULL;
 634                dr->entry_size = 0;
 635                dr->num_entries = 0;
 636                dr->pending = 0;
 637                dr->ncookies = 0;
 638        }
 639}
 640
 641static int probe_disk(struct vdc_port *port)
 642{
 643        struct vio_completion comp;
 644        struct request_queue *q;
 645        struct gendisk *g;
 646        int err;
 647
 648        init_completion(&comp.com);
 649        comp.err = 0;
 650        comp.waiting_for = WAITING_FOR_LINK_UP;
 651        port->vio.cmp = &comp;
 652
 653        vio_port_up(&port->vio);
 654
 655        wait_for_completion(&comp.com);
 656        if (comp.err)
 657                return comp.err;
 658
 659        err = generic_request(port, VD_OP_GET_VTOC,
 660                              &port->label, sizeof(port->label));
 661        if (err < 0) {
 662                printk(KERN_ERR PFX "VD_OP_GET_VTOC returns error %d\n", err);
 663                return err;
 664        }
 665
 666        err = generic_request(port, VD_OP_GET_DISKGEOM,
 667                              &port->geom, sizeof(port->geom));
 668        if (err < 0) {
 669                printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
 670                       "error %d\n", err);
 671                return err;
 672        }
 673
 674        port->vdisk_size = ((u64)port->geom.num_cyl *
 675                            (u64)port->geom.num_hd *
 676                            (u64)port->geom.num_sec);
 677
 678        q = blk_init_queue(do_vdc_request, &port->vio.lock);
 679        if (!q) {
 680                printk(KERN_ERR PFX "%s: Could not allocate queue.\n",
 681                       port->vio.name);
 682                return -ENOMEM;
 683        }
 684        g = alloc_disk(1 << PARTITION_SHIFT);
 685        if (!g) {
 686                printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n",
 687                       port->vio.name);
 688                blk_cleanup_queue(q);
 689                return -ENOMEM;
 690        }
 691
 692        port->disk = g;
 693
 694        blk_queue_max_hw_segments(q, port->ring_cookies);
 695        blk_queue_max_phys_segments(q, port->ring_cookies);
 696        blk_queue_max_sectors(q, port->max_xfer_size);
 697        g->major = vdc_major;
 698        g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
 699        strcpy(g->disk_name, port->disk_name);
 700
 701        g->fops = &vdc_fops;
 702        g->queue = q;
 703        g->private_data = port;
 704        g->driverfs_dev = &port->vio.vdev->dev;
 705
 706        set_capacity(g, port->vdisk_size);
 707
 708        printk(KERN_INFO PFX "%s: %u sectors (%u MB)\n",
 709               g->disk_name,
 710               port->vdisk_size, (port->vdisk_size >> (20 - 9)));
 711
 712        add_disk(g);
 713
 714        return 0;
 715}
 716
 717static struct ldc_channel_config vdc_ldc_cfg = {
 718        .event          = vdc_event,
 719        .mtu            = 64,
 720        .mode           = LDC_MODE_UNRELIABLE,
 721};
 722
 723static struct vio_driver_ops vdc_vio_ops = {
 724        .send_attr              = vdc_send_attr,
 725        .handle_attr            = vdc_handle_attr,
 726        .handshake_complete     = vdc_handshake_complete,
 727};
 728
 729static void __devinit print_version(void)
 730{
 731        static int version_printed;
 732
 733        if (version_printed++ == 0)
 734                printk(KERN_INFO "%s", version);
 735}
 736
 737static int __devinit vdc_port_probe(struct vio_dev *vdev,
 738                                    const struct vio_device_id *id)
 739{
 740        struct mdesc_handle *hp;
 741        struct vdc_port *port;
 742        int err;
 743
 744        print_version();
 745
 746        hp = mdesc_grab();
 747
 748        err = -ENODEV;
 749        if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
 750                printk(KERN_ERR PFX "Port id [%llu] too large.\n",
 751                       vdev->dev_no);
 752                goto err_out_release_mdesc;
 753        }
 754
 755        port = kzalloc(sizeof(*port), GFP_KERNEL);
 756        err = -ENOMEM;
 757        if (!port) {
 758                printk(KERN_ERR PFX "Cannot allocate vdc_port.\n");
 759                goto err_out_release_mdesc;
 760        }
 761
 762        if (vdev->dev_no >= 26)
 763                snprintf(port->disk_name, sizeof(port->disk_name),
 764                         VDCBLK_NAME "%c%c",
 765                         'a' + ((int)vdev->dev_no / 26) - 1,
 766                         'a' + ((int)vdev->dev_no % 26));
 767        else
 768                snprintf(port->disk_name, sizeof(port->disk_name),
 769                         VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
 770
 771        err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
 772                              vdc_versions, ARRAY_SIZE(vdc_versions),
 773                              &vdc_vio_ops, port->disk_name);
 774        if (err)
 775                goto err_out_free_port;
 776
 777        port->vdisk_block_size = 512;
 778        port->max_xfer_size = ((128 * 1024) / port->vdisk_block_size);
 779        port->ring_cookies = ((port->max_xfer_size *
 780                               port->vdisk_block_size) / PAGE_SIZE) + 2;
 781
 782        err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
 783        if (err)
 784                goto err_out_free_port;
 785
 786        err = vdc_alloc_tx_ring(port);
 787        if (err)
 788                goto err_out_free_ldc;
 789
 790        err = probe_disk(port);
 791        if (err)
 792                goto err_out_free_tx_ring;
 793
 794        dev_set_drvdata(&vdev->dev, port);
 795
 796        mdesc_release(hp);
 797
 798        return 0;
 799
 800err_out_free_tx_ring:
 801        vdc_free_tx_ring(port);
 802
 803err_out_free_ldc:
 804        vio_ldc_free(&port->vio);
 805
 806err_out_free_port:
 807        kfree(port);
 808
 809err_out_release_mdesc:
 810        mdesc_release(hp);
 811        return err;
 812}
 813
 814static int vdc_port_remove(struct vio_dev *vdev)
 815{
 816        struct vdc_port *port = dev_get_drvdata(&vdev->dev);
 817
 818        if (port) {
 819                del_timer_sync(&port->vio.timer);
 820
 821                vdc_free_tx_ring(port);
 822                vio_ldc_free(&port->vio);
 823
 824                dev_set_drvdata(&vdev->dev, NULL);
 825
 826                kfree(port);
 827        }
 828        return 0;
 829}
 830
 831static const struct vio_device_id vdc_port_match[] = {
 832        {
 833                .type = "vdc-port",
 834        },
 835        {},
 836};
 837MODULE_DEVICE_TABLE(vio, vdc_port_match);
 838
 839static struct vio_driver vdc_port_driver = {
 840        .id_table       = vdc_port_match,
 841        .probe          = vdc_port_probe,
 842        .remove         = vdc_port_remove,
 843        .driver         = {
 844                .name   = "vdc_port",
 845                .owner  = THIS_MODULE,
 846        }
 847};
 848
 849static int __init vdc_init(void)
 850{
 851        int err;
 852
 853        err = register_blkdev(0, VDCBLK_NAME);
 854        if (err < 0)
 855                goto out_err;
 856
 857        vdc_major = err;
 858
 859        err = vio_register_driver(&vdc_port_driver);
 860        if (err)
 861                goto out_unregister_blkdev;
 862
 863        return 0;
 864
 865out_unregister_blkdev:
 866        unregister_blkdev(vdc_major, VDCBLK_NAME);
 867        vdc_major = 0;
 868
 869out_err:
 870        return err;
 871}
 872
 873static void __exit vdc_exit(void)
 874{
 875        vio_unregister_driver(&vdc_port_driver);
 876        unregister_blkdev(vdc_major, VDCBLK_NAME);
 877}
 878
 879module_init(vdc_init);
 880module_exit(vdc_exit);
 881