qemu/pc-bios/s390-ccw/virtio.c
<<
>>
Prefs
   1/*
   2 * Virtio driver bits
   3 *
   4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
   5 *
   6 * This work is licensed under the terms of the GNU GPL, version 2 or (at
   7 * your option) any later version. See the COPYING file in the top-level
   8 * directory.
   9 */
  10
  11#include "s390-ccw.h"
  12#include "virtio.h"
  13
  14static struct vring block;
  15
  16static char chsc_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
  17
  18static long kvm_hypercall(unsigned long nr, unsigned long param1,
  19                          unsigned long param2)
  20{
  21    register ulong r_nr asm("1") = nr;
  22    register ulong r_param1 asm("2") = param1;
  23    register ulong r_param2 asm("3") = param2;
  24    register long retval asm("2");
  25
  26    asm volatile ("diag 2,4,0x500"
  27                  : "=d" (retval)
  28                  : "d" (r_nr), "0" (r_param1), "r"(r_param2)
  29                  : "memory", "cc");
  30
  31    return retval;
  32}
  33
  34static void virtio_notify(struct subchannel_id schid)
  35{
  36    kvm_hypercall(KVM_S390_VIRTIO_CCW_NOTIFY, *(u32 *)&schid, 0);
  37}
  38
  39/***********************************************
  40 *             Virtio functions                *
  41 ***********************************************/
  42
  43static int drain_irqs(struct subchannel_id schid)
  44{
  45    struct irb irb = {};
  46    int r = 0;
  47
  48    while (1) {
  49        /* FIXME: make use of TPI, for that enable subchannel and isc */
  50        if (tsch(schid, &irb)) {
  51            /* Might want to differentiate error codes later on. */
  52            if (irb.scsw.cstat) {
  53                r = -EIO;
  54            } else if (irb.scsw.dstat != 0xc) {
  55                r = -EIO;
  56            }
  57            return r;
  58        }
  59    }
  60}
  61
  62static int run_ccw(struct subchannel_id schid, int cmd, void *ptr, int len)
  63{
  64    struct ccw1 ccw = {};
  65    struct cmd_orb orb = {};
  66    struct schib schib;
  67    int r;
  68
  69    /* start command processing */
  70    stsch_err(schid, &schib);
  71    schib.scsw.ctrl = SCSW_FCTL_START_FUNC;
  72    msch(schid, &schib);
  73
  74    /* start subchannel command */
  75    orb.fmt = 1;
  76    orb.cpa = (u32)(long)&ccw;
  77    orb.lpm = 0x80;
  78
  79    ccw.cmd_code = cmd;
  80    ccw.cda = (long)ptr;
  81    ccw.count = len;
  82
  83    r = ssch(schid, &orb);
  84    /*
  85     * XXX Wait until device is done processing the CCW. For now we can
  86     *     assume that a simple tsch will have finished the CCW processing,
  87     *     but the architecture allows for asynchronous operation
  88     */
  89    if (!r) {
  90        r = drain_irqs(schid);
  91    }
  92    return r;
  93}
  94
  95static void virtio_set_status(struct subchannel_id schid,
  96                              unsigned long dev_addr)
  97{
  98    unsigned char status = dev_addr;
  99    if (run_ccw(schid, CCW_CMD_WRITE_STATUS, &status, sizeof(status))) {
 100        virtio_panic("Could not write status to host!\n");
 101    }
 102}
 103
 104static void virtio_reset(struct subchannel_id schid)
 105{
 106    run_ccw(schid, CCW_CMD_VDEV_RESET, NULL, 0);
 107}
 108
 109static void vring_init(struct vring *vr, unsigned int num, void *p,
 110                       unsigned long align)
 111{
 112    debug_print_addr("init p", p);
 113    vr->num = num;
 114    vr->desc = p;
 115    vr->avail = p + num*sizeof(struct vring_desc);
 116    vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + align-1)
 117                & ~(align - 1));
 118
 119    /* Zero out all relevant field */
 120    vr->avail->flags = 0;
 121    vr->avail->idx = 0;
 122
 123    /* We're running with interrupts off anyways, so don't bother */
 124    vr->used->flags = VRING_USED_F_NO_NOTIFY;
 125    vr->used->idx = 0;
 126    vr->used_idx = 0;
 127    vr->next_idx = 0;
 128
 129    debug_print_addr("init vr", vr);
 130}
 131
 132static void vring_notify(struct subchannel_id schid)
 133{
 134    virtio_notify(schid);
 135}
 136
 137static void vring_send_buf(struct vring *vr, void *p, int len, int flags)
 138{
 139    /* For follow-up chains we need to keep the first entry point */
 140    if (!(flags & VRING_HIDDEN_IS_CHAIN)) {
 141        vr->avail->ring[vr->avail->idx % vr->num] = vr->next_idx;
 142    }
 143
 144    vr->desc[vr->next_idx].addr = (ulong)p;
 145    vr->desc[vr->next_idx].len = len;
 146    vr->desc[vr->next_idx].flags = flags & ~VRING_HIDDEN_IS_CHAIN;
 147    vr->desc[vr->next_idx].next = vr->next_idx;
 148    vr->desc[vr->next_idx].next++;
 149    vr->next_idx++;
 150
 151    /* Chains only have a single ID */
 152    if (!(flags & VRING_DESC_F_NEXT)) {
 153        vr->avail->idx++;
 154    }
 155}
 156
 157static u64 get_clock(void)
 158{
 159    u64 r;
 160
 161    asm volatile("stck %0" : "=Q" (r) : : "cc");
 162    return r;
 163}
 164
 165static ulong get_second(void)
 166{
 167    return (get_clock() >> 12) / 1000000;
 168}
 169
 170/*
 171 * Wait for the host to reply.
 172 *
 173 * timeout is in seconds if > 0.
 174 *
 175 * Returns 0 on success, 1 on timeout.
 176 */
 177static int vring_wait_reply(struct vring *vr, int timeout)
 178{
 179    ulong target_second = get_second() + timeout;
 180    struct subchannel_id schid = vr->schid;
 181    int r = 0;
 182
 183    /* Wait until the used index has moved. */
 184    while (vr->used->idx == vr->used_idx) {
 185        vring_notify(schid);
 186        if (timeout && (get_second() >= target_second)) {
 187            r = 1;
 188            break;
 189        }
 190        yield();
 191    }
 192
 193    vr->used_idx = vr->used->idx;
 194    vr->next_idx = 0;
 195    vr->desc[0].len = 0;
 196    vr->desc[0].flags = 0;
 197
 198    return r;
 199}
 200
 201/***********************************************
 202 *               Virtio block                  *
 203 ***********************************************/
 204
 205int virtio_read_many(ulong sector, void *load_addr, int sec_num)
 206{
 207    struct virtio_blk_outhdr out_hdr;
 208    u8 status;
 209    int r;
 210
 211    /* Tell the host we want to read */
 212    out_hdr.type = VIRTIO_BLK_T_IN;
 213    out_hdr.ioprio = 99;
 214    out_hdr.sector = virtio_sector_adjust(sector);
 215
 216    vring_send_buf(&block, &out_hdr, sizeof(out_hdr), VRING_DESC_F_NEXT);
 217
 218    /* This is where we want to receive data */
 219    vring_send_buf(&block, load_addr, virtio_get_block_size() * sec_num,
 220                   VRING_DESC_F_WRITE | VRING_HIDDEN_IS_CHAIN |
 221                   VRING_DESC_F_NEXT);
 222
 223    /* status field */
 224    vring_send_buf(&block, &status, sizeof(u8), VRING_DESC_F_WRITE |
 225                   VRING_HIDDEN_IS_CHAIN);
 226
 227    /* Now we can tell the host to read */
 228    vring_wait_reply(&block, 0);
 229
 230    r = drain_irqs(block.schid);
 231    if (r) {
 232        /* Well, whatever status is supposed to contain... */
 233        status = 1;
 234    }
 235    return status;
 236}
 237
 238unsigned long virtio_load_direct(ulong rec_list1, ulong rec_list2,
 239                                 ulong subchan_id, void *load_addr)
 240{
 241    u8 status;
 242    int sec = rec_list1;
 243    int sec_num = ((rec_list2 >> 32) & 0xffff) + 1;
 244    int sec_len = rec_list2 >> 48;
 245    ulong addr = (ulong)load_addr;
 246
 247    if (sec_len != virtio_get_block_size()) {
 248        return -1;
 249    }
 250
 251    sclp_print(".");
 252    status = virtio_read_many(sec, (void *)addr, sec_num);
 253    if (status) {
 254        virtio_panic("I/O Error");
 255    }
 256    addr += sec_num * virtio_get_block_size();
 257
 258    return addr;
 259}
 260
 261int virtio_read(ulong sector, void *load_addr)
 262{
 263    return virtio_read_many(sector, load_addr, 1);
 264}
 265
 266static VirtioBlkConfig blk_cfg = {};
 267static bool guessed_disk_nature;
 268
 269bool virtio_guessed_disk_nature(void)
 270{
 271    return guessed_disk_nature;
 272}
 273
 274void virtio_assume_scsi(void)
 275{
 276    guessed_disk_nature = true;
 277    blk_cfg.blk_size = 512;
 278    blk_cfg.physical_block_exp = 0;
 279}
 280
 281void virtio_assume_eckd(void)
 282{
 283    guessed_disk_nature = true;
 284    blk_cfg.blk_size = 4096;
 285    blk_cfg.physical_block_exp = 0;
 286
 287    /* this must be here to calculate code segment position */
 288    blk_cfg.geometry.heads = 15;
 289    blk_cfg.geometry.sectors = 12;
 290}
 291
 292bool virtio_disk_is_scsi(void)
 293{
 294    if (guessed_disk_nature) {
 295        return (virtio_get_block_size()  == 512);
 296    }
 297    return (blk_cfg.geometry.heads == 255)
 298        && (blk_cfg.geometry.sectors == 63)
 299        && (virtio_get_block_size()  == 512);
 300}
 301
 302/*
 303 * Other supported value pairs, if any, would need to be added here.
 304 * Note: head count is always 15.
 305 */
 306static inline u8 virtio_eckd_sectors_for_block_size(int size)
 307{
 308    switch (size) {
 309    case 512:
 310        return 49;
 311    case 1024:
 312        return 33;
 313    case 2048:
 314        return 21;
 315    case 4096:
 316        return 12;
 317    }
 318    return 0;
 319}
 320
 321bool virtio_disk_is_eckd(void)
 322{
 323    const int block_size = virtio_get_block_size();
 324
 325    if (guessed_disk_nature) {
 326        return (block_size  == 4096);
 327    }
 328    return (blk_cfg.geometry.heads == 15)
 329        && (blk_cfg.geometry.sectors ==
 330            virtio_eckd_sectors_for_block_size(block_size));
 331}
 332
 333bool virtio_ipl_disk_is_valid(void)
 334{
 335    return virtio_disk_is_scsi() || virtio_disk_is_eckd();
 336}
 337
 338int virtio_get_block_size(void)
 339{
 340    return blk_cfg.blk_size << blk_cfg.physical_block_exp;
 341}
 342
 343uint8_t virtio_get_heads(void)
 344{
 345    return blk_cfg.geometry.heads;
 346}
 347
 348uint8_t virtio_get_sectors(void)
 349{
 350    return blk_cfg.geometry.sectors;
 351}
 352
 353uint64_t virtio_get_blocks(void)
 354{
 355    return blk_cfg.capacity /
 356           (virtio_get_block_size() / VIRTIO_SECTOR_SIZE);
 357}
 358
 359void virtio_setup_block(struct subchannel_id schid)
 360{
 361    struct vq_info_block info;
 362    struct vq_config_block config = {};
 363
 364    blk_cfg.blk_size = 0; /* mark "illegal" - setup started... */
 365    guessed_disk_nature = false;
 366
 367    virtio_reset(schid);
 368
 369    /*
 370     * Skipping CCW_CMD_READ_FEAT. We're not doing anything fancy, and
 371     * we'll just stop dead anyway if anything does not work like we
 372     * expect it.
 373     */
 374
 375    config.index = 0;
 376    if (run_ccw(schid, CCW_CMD_READ_VQ_CONF, &config, sizeof(config))) {
 377        virtio_panic("Could not get block device VQ configuration\n");
 378    }
 379    if (run_ccw(schid, CCW_CMD_READ_CONF, &blk_cfg, sizeof(blk_cfg))) {
 380        virtio_panic("Could not get block device configuration\n");
 381    }
 382    vring_init(&block, config.num, ring_area,
 383               KVM_S390_VIRTIO_RING_ALIGN);
 384
 385    info.queue = (unsigned long long) ring_area;
 386    info.align = KVM_S390_VIRTIO_RING_ALIGN;
 387    info.index = 0;
 388    info.num = config.num;
 389    block.schid = schid;
 390
 391    if (!run_ccw(schid, CCW_CMD_SET_VQ, &info, sizeof(info))) {
 392        virtio_set_status(schid, VIRTIO_CONFIG_S_DRIVER_OK);
 393    }
 394
 395    if (!virtio_ipl_disk_is_valid()) {
 396        /* make sure all getters but blocksize return 0 for invalid IPL disk */
 397        memset(&blk_cfg, 0, sizeof(blk_cfg));
 398        virtio_assume_scsi();
 399    }
 400}
 401
 402bool virtio_is_blk(struct subchannel_id schid)
 403{
 404    int r;
 405    struct senseid senseid = {};
 406
 407    /* run sense id command */
 408    r = run_ccw(schid, CCW_CMD_SENSE_ID, &senseid, sizeof(senseid));
 409    if (r) {
 410        return false;
 411    }
 412    if ((senseid.cu_type != 0x3832) || (senseid.cu_model != VIRTIO_ID_BLOCK)) {
 413        return false;
 414    }
 415
 416    return true;
 417}
 418
 419int enable_mss_facility(void)
 420{
 421    int ret;
 422    struct chsc_area_sda *sda_area = (struct chsc_area_sda *) chsc_page;
 423
 424    memset(sda_area, 0, PAGE_SIZE);
 425    sda_area->request.length = 0x0400;
 426    sda_area->request.code = 0x0031;
 427    sda_area->operation_code = 0x2;
 428
 429    ret = chsc(sda_area);
 430    if ((ret == 0) && (sda_area->response.code == 0x0001)) {
 431        return 0;
 432    }
 433    return -EIO;
 434}
 435