qemu/hw/s390x/s390-pci-inst.c
<<
>>
Prefs
   1/*
   2 * s390 PCI instructions
   3 *
   4 * Copyright 2014 IBM Corp.
   5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
   6 *            Hong Bo Li <lihbbj@cn.ibm.com>
   7 *            Yi Min Zhao <zyimin@cn.ibm.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2 or (at
  10 * your option) any later version. See the COPYING file in the top-level
  11 * directory.
  12 */
  13
  14#include "qemu/osdep.h"
  15#include "exec/memop.h"
  16#include "exec/memory.h"
  17#include "qemu/error-report.h"
  18#include "sysemu/hw_accel.h"
  19#include "hw/pci/pci_device.h"
  20#include "hw/s390x/s390-pci-inst.h"
  21#include "hw/s390x/s390-pci-bus.h"
  22#include "hw/s390x/s390-pci-kvm.h"
  23#include "hw/s390x/s390-pci-vfio.h"
  24#include "hw/s390x/tod.h"
  25
  26#ifndef DEBUG_S390PCI_INST
  27#define DEBUG_S390PCI_INST  0
  28#endif
  29
  30#define DPRINTF(fmt, ...)                                          \
  31    do {                                                           \
  32        if (DEBUG_S390PCI_INST) {                                  \
  33            fprintf(stderr, "s390pci-inst: " fmt, ## __VA_ARGS__); \
  34        }                                                          \
  35    } while (0)
  36
  37static inline void inc_dma_avail(S390PCIIOMMU *iommu)
  38{
  39    if (iommu->dma_limit) {
  40        iommu->dma_limit->avail++;
  41    }
  42}
  43
  44static inline void dec_dma_avail(S390PCIIOMMU *iommu)
  45{
  46    if (iommu->dma_limit) {
  47        iommu->dma_limit->avail--;
  48    }
  49}
  50
  51static void s390_set_status_code(CPUS390XState *env,
  52                                 uint8_t r, uint64_t status_code)
  53{
  54    env->regs[r] &= ~0xff000000ULL;
  55    env->regs[r] |= (status_code & 0xff) << 24;
  56}
  57
  58static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc)
  59{
  60    S390PCIBusDevice *pbdev = NULL;
  61    S390pciState *s = s390_get_phb();
  62    uint32_t res_code, initial_l2, g_l2;
  63    int rc, i;
  64    uint64_t resume_token;
  65
  66    rc = 0;
  67    if (lduw_p(&rrb->request.hdr.len) != 32) {
  68        res_code = CLP_RC_LEN;
  69        rc = -EINVAL;
  70        goto out;
  71    }
  72
  73    if ((ldl_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) {
  74        res_code = CLP_RC_FMT;
  75        rc = -EINVAL;
  76        goto out;
  77    }
  78
  79    if ((ldl_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 ||
  80        ldq_p(&rrb->request.reserved1) != 0) {
  81        res_code = CLP_RC_RESNOT0;
  82        rc = -EINVAL;
  83        goto out;
  84    }
  85
  86    resume_token = ldq_p(&rrb->request.resume_token);
  87
  88    if (resume_token) {
  89        pbdev = s390_pci_find_dev_by_idx(s, resume_token);
  90        if (!pbdev) {
  91            res_code = CLP_RC_LISTPCI_BADRT;
  92            rc = -EINVAL;
  93            goto out;
  94        }
  95    } else {
  96        pbdev = s390_pci_find_next_avail_dev(s, NULL);
  97    }
  98
  99    if (lduw_p(&rrb->response.hdr.len) < 48) {
 100        res_code = CLP_RC_8K;
 101        rc = -EINVAL;
 102        goto out;
 103    }
 104
 105    initial_l2 = lduw_p(&rrb->response.hdr.len);
 106    if ((initial_l2 - LIST_PCI_HDR_LEN) % sizeof(ClpFhListEntry)
 107        != 0) {
 108        res_code = CLP_RC_LEN;
 109        rc = -EINVAL;
 110        *cc = 3;
 111        goto out;
 112    }
 113
 114    stl_p(&rrb->response.fmt, 0);
 115    stq_p(&rrb->response.reserved1, 0);
 116    stl_p(&rrb->response.mdd, FH_MASK_SHM);
 117    stw_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS);
 118    rrb->response.flags = UID_CHECKING_ENABLED;
 119    rrb->response.entry_size = sizeof(ClpFhListEntry);
 120
 121    i = 0;
 122    g_l2 = LIST_PCI_HDR_LEN;
 123    while (g_l2 < initial_l2 && pbdev) {
 124        stw_p(&rrb->response.fh_list[i].device_id,
 125            pci_get_word(pbdev->pdev->config + PCI_DEVICE_ID));
 126        stw_p(&rrb->response.fh_list[i].vendor_id,
 127            pci_get_word(pbdev->pdev->config + PCI_VENDOR_ID));
 128        /* Ignore RESERVED devices. */
 129        stl_p(&rrb->response.fh_list[i].config,
 130            pbdev->state == ZPCI_FS_STANDBY ? 0 : 1 << 31);
 131        stl_p(&rrb->response.fh_list[i].fid, pbdev->fid);
 132        stl_p(&rrb->response.fh_list[i].fh, pbdev->fh);
 133
 134        g_l2 += sizeof(ClpFhListEntry);
 135        /* Add endian check for DPRINTF? */
 136        DPRINTF("g_l2 %d vendor id 0x%x device id 0x%x fid 0x%x fh 0x%x\n",
 137                g_l2,
 138                lduw_p(&rrb->response.fh_list[i].vendor_id),
 139                lduw_p(&rrb->response.fh_list[i].device_id),
 140                ldl_p(&rrb->response.fh_list[i].fid),
 141                ldl_p(&rrb->response.fh_list[i].fh));
 142        pbdev = s390_pci_find_next_avail_dev(s, pbdev);
 143        i++;
 144    }
 145
 146    if (!pbdev) {
 147        resume_token = 0;
 148    } else {
 149        resume_token = pbdev->fh & FH_MASK_INDEX;
 150    }
 151    stq_p(&rrb->response.resume_token, resume_token);
 152    stw_p(&rrb->response.hdr.len, g_l2);
 153    stw_p(&rrb->response.hdr.rsp, CLP_RC_OK);
 154out:
 155    if (rc) {
 156        DPRINTF("list pci failed rc 0x%x\n", rc);
 157        stw_p(&rrb->response.hdr.rsp, res_code);
 158    }
 159    return rc;
 160}
 161
 162int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra)
 163{
 164    ClpReqHdr *reqh;
 165    ClpRspHdr *resh;
 166    S390PCIBusDevice *pbdev;
 167    uint32_t req_len;
 168    uint32_t res_len;
 169    uint8_t buffer[4096 * 2];
 170    uint8_t cc = 0;
 171    CPUS390XState *env = &cpu->env;
 172    S390pciState *s = s390_get_phb();
 173    int i;
 174
 175    if (env->psw.mask & PSW_MASK_PSTATE) {
 176        s390_program_interrupt(env, PGM_PRIVILEGED, ra);
 177        return 0;
 178    }
 179
 180    if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, sizeof(*reqh))) {
 181        s390_cpu_virt_mem_handle_exc(cpu, ra);
 182        return 0;
 183    }
 184    reqh = (ClpReqHdr *)buffer;
 185    req_len = lduw_p(&reqh->len);
 186    if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) {
 187        s390_program_interrupt(env, PGM_OPERAND, ra);
 188        return 0;
 189    }
 190
 191    if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
 192                               req_len + sizeof(*resh))) {
 193        s390_cpu_virt_mem_handle_exc(cpu, ra);
 194        return 0;
 195    }
 196    resh = (ClpRspHdr *)(buffer + req_len);
 197    res_len = lduw_p(&resh->len);
 198    if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) {
 199        s390_program_interrupt(env, PGM_OPERAND, ra);
 200        return 0;
 201    }
 202    if ((req_len + res_len) > 8192) {
 203        s390_program_interrupt(env, PGM_OPERAND, ra);
 204        return 0;
 205    }
 206
 207    if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
 208                               req_len + res_len)) {
 209        s390_cpu_virt_mem_handle_exc(cpu, ra);
 210        return 0;
 211    }
 212
 213    if (req_len != 32) {
 214        stw_p(&resh->rsp, CLP_RC_LEN);
 215        goto out;
 216    }
 217
 218    switch (lduw_p(&reqh->cmd)) {
 219    case CLP_LIST_PCI: {
 220        ClpReqRspListPci *rrb = (ClpReqRspListPci *)buffer;
 221        list_pci(rrb, &cc);
 222        break;
 223    }
 224    case CLP_SET_PCI_FN: {
 225        ClpReqSetPci *reqsetpci = (ClpReqSetPci *)reqh;
 226        ClpRspSetPci *ressetpci = (ClpRspSetPci *)resh;
 227
 228        pbdev = s390_pci_find_dev_by_fh(s, ldl_p(&reqsetpci->fh));
 229        if (!pbdev) {
 230                stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH);
 231                goto out;
 232        }
 233
 234        switch (reqsetpci->oc) {
 235        case CLP_SET_ENABLE_PCI_FN:
 236            switch (reqsetpci->ndas) {
 237            case 0:
 238                stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_DMAAS);
 239                goto out;
 240            case 1:
 241                break;
 242            default:
 243                stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_RES);
 244                goto out;
 245            }
 246
 247            if (pbdev->fh & FH_MASK_ENABLE) {
 248                stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
 249                goto out;
 250            }
 251
 252            /*
 253             * Take this opportunity to make sure we still have an accurate
 254             * host fh.  It's possible part of the handle changed while the
 255             * device was disabled to the guest (e.g. vfio hot reset for
 256             * ISM during plug)
 257             */
 258            if (pbdev->interp) {
 259                /* Take this opportunity to make sure we are sync'd with host */
 260                if (!s390_pci_get_host_fh(pbdev, &pbdev->fh) ||
 261                    !(pbdev->fh & FH_MASK_ENABLE)) {
 262                    stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH);
 263                    goto out;
 264                }
 265            }
 266            pbdev->fh |= FH_MASK_ENABLE;
 267            pbdev->state = ZPCI_FS_ENABLED;
 268            stl_p(&ressetpci->fh, pbdev->fh);
 269            stw_p(&ressetpci->hdr.rsp, CLP_RC_OK);
 270            break;
 271        case CLP_SET_DISABLE_PCI_FN:
 272            if (!(pbdev->fh & FH_MASK_ENABLE)) {
 273                stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
 274                goto out;
 275            }
 276            device_cold_reset(DEVICE(pbdev));
 277            pbdev->fh &= ~FH_MASK_ENABLE;
 278            pbdev->state = ZPCI_FS_DISABLED;
 279            stl_p(&ressetpci->fh, pbdev->fh);
 280            stw_p(&ressetpci->hdr.rsp, CLP_RC_OK);
 281            break;
 282        default:
 283            DPRINTF("unknown set pci command\n");
 284            stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
 285            break;
 286        }
 287        break;
 288    }
 289    case CLP_QUERY_PCI_FN: {
 290        ClpReqQueryPci *reqquery = (ClpReqQueryPci *)reqh;
 291        ClpRspQueryPci *resquery = (ClpRspQueryPci *)resh;
 292
 293        pbdev = s390_pci_find_dev_by_fh(s, ldl_p(&reqquery->fh));
 294        if (!pbdev) {
 295            DPRINTF("query pci no pci dev\n");
 296            stw_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH);
 297            goto out;
 298        }
 299
 300        stq_p(&resquery->sdma, pbdev->zpci_fn.sdma);
 301        stq_p(&resquery->edma, pbdev->zpci_fn.edma);
 302        stw_p(&resquery->pchid, pbdev->zpci_fn.pchid);
 303        stw_p(&resquery->vfn, pbdev->zpci_fn.vfn);
 304        resquery->flags = pbdev->zpci_fn.flags;
 305        resquery->pfgid = pbdev->zpci_fn.pfgid;
 306        resquery->pft = pbdev->zpci_fn.pft;
 307        resquery->fmbl = pbdev->zpci_fn.fmbl;
 308        stl_p(&resquery->fid, pbdev->zpci_fn.fid);
 309        stl_p(&resquery->uid, pbdev->zpci_fn.uid);
 310        memcpy(resquery->pfip, pbdev->zpci_fn.pfip, CLP_PFIP_NR_SEGMENTS);
 311        memcpy(resquery->util_str, pbdev->zpci_fn.util_str, CLP_UTIL_STR_LEN);
 312
 313        for (i = 0; i < PCI_BAR_COUNT; i++) {
 314            uint32_t data = pci_get_long(pbdev->pdev->config +
 315                PCI_BASE_ADDRESS_0 + (i * 4));
 316
 317            stl_p(&resquery->bar[i], data);
 318            resquery->bar_size[i] = pbdev->pdev->io_regions[i].size ?
 319                                    ctz64(pbdev->pdev->io_regions[i].size) : 0;
 320            DPRINTF("bar %d addr 0x%x size 0x%" PRIx64 "barsize 0x%x\n", i,
 321                    ldl_p(&resquery->bar[i]),
 322                    pbdev->pdev->io_regions[i].size,
 323                    resquery->bar_size[i]);
 324        }
 325
 326        stw_p(&resquery->hdr.rsp, CLP_RC_OK);
 327        break;
 328    }
 329    case CLP_QUERY_PCI_FNGRP: {
 330        ClpRspQueryPciGrp *resgrp = (ClpRspQueryPciGrp *)resh;
 331
 332        ClpReqQueryPciGrp *reqgrp = (ClpReqQueryPciGrp *)reqh;
 333        S390PCIGroup *group;
 334
 335        group = s390_group_find(reqgrp->g);
 336        if (!group) {
 337            /* We do not allow access to unknown groups */
 338            /* The group must have been obtained with a vfio device */
 339            stw_p(&resgrp->hdr.rsp, CLP_RC_QUERYPCIFG_PFGID);
 340            goto out;
 341        }
 342        resgrp->fr = group->zpci_group.fr;
 343        stq_p(&resgrp->dasm, group->zpci_group.dasm);
 344        stq_p(&resgrp->msia, group->zpci_group.msia);
 345        stw_p(&resgrp->mui, group->zpci_group.mui);
 346        stw_p(&resgrp->i, group->zpci_group.i);
 347        stw_p(&resgrp->maxstbl, group->zpci_group.maxstbl);
 348        resgrp->version = group->zpci_group.version;
 349        resgrp->dtsm = group->zpci_group.dtsm;
 350        stw_p(&resgrp->hdr.rsp, CLP_RC_OK);
 351        break;
 352    }
 353    default:
 354        DPRINTF("unknown clp command\n");
 355        stw_p(&resh->rsp, CLP_RC_CMD);
 356        break;
 357    }
 358
 359out:
 360    if (s390_cpu_virt_mem_write(cpu, env->regs[r2], r2, buffer,
 361                                req_len + res_len)) {
 362        s390_cpu_virt_mem_handle_exc(cpu, ra);
 363        return 0;
 364    }
 365    setcc(cpu, cc);
 366    return 0;
 367}
 368
 369/**
 370 * Swap data contained in s390x big endian registers to little endian
 371 * PCI bars.
 372 *
 373 * @ptr: a pointer to a uint64_t data field
 374 * @len: the length of the valid data, must be 1,2,4 or 8
 375 */
 376static int zpci_endian_swap(uint64_t *ptr, uint8_t len)
 377{
 378    uint64_t data = *ptr;
 379
 380    switch (len) {
 381    case 1:
 382        break;
 383    case 2:
 384        data = bswap16(data);
 385        break;
 386    case 4:
 387        data = bswap32(data);
 388        break;
 389    case 8:
 390        data = bswap64(data);
 391        break;
 392    default:
 393        return -EINVAL;
 394    }
 395    *ptr = data;
 396    return 0;
 397}
 398
 399static MemoryRegion *s390_get_subregion(MemoryRegion *mr, uint64_t offset,
 400                                        uint8_t len)
 401{
 402    MemoryRegion *subregion;
 403    uint64_t subregion_size;
 404
 405    QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
 406        subregion_size = int128_get64(subregion->size);
 407        if ((offset >= subregion->addr) &&
 408            (offset + len) <= (subregion->addr + subregion_size)) {
 409            mr = subregion;
 410            break;
 411        }
 412    }
 413    return mr;
 414}
 415
 416static MemTxResult zpci_read_bar(S390PCIBusDevice *pbdev, uint8_t pcias,
 417                                 uint64_t offset, uint64_t *data, uint8_t len)
 418{
 419    MemoryRegion *mr;
 420
 421    mr = pbdev->pdev->io_regions[pcias].memory;
 422    mr = s390_get_subregion(mr, offset, len);
 423    offset -= mr->addr;
 424    return memory_region_dispatch_read(mr, offset, data,
 425                                       size_memop(len) | MO_BE,
 426                                       MEMTXATTRS_UNSPECIFIED);
 427}
 428
 429int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
 430{
 431    CPUS390XState *env = &cpu->env;
 432    S390PCIBusDevice *pbdev;
 433    uint64_t offset;
 434    uint64_t data;
 435    MemTxResult result;
 436    uint8_t len;
 437    uint32_t fh;
 438    uint8_t pcias;
 439
 440    if (env->psw.mask & PSW_MASK_PSTATE) {
 441        s390_program_interrupt(env, PGM_PRIVILEGED, ra);
 442        return 0;
 443    }
 444
 445    if (r2 & 0x1) {
 446        s390_program_interrupt(env, PGM_SPECIFICATION, ra);
 447        return 0;
 448    }
 449
 450    fh = env->regs[r2] >> 32;
 451    pcias = (env->regs[r2] >> 16) & 0xf;
 452    len = env->regs[r2] & 0xf;
 453    offset = env->regs[r2 + 1];
 454
 455    if (!(fh & FH_MASK_ENABLE)) {
 456        setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
 457        return 0;
 458    }
 459
 460    pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
 461    if (!pbdev) {
 462        DPRINTF("pcilg no pci dev\n");
 463        setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
 464        return 0;
 465    }
 466
 467    switch (pbdev->state) {
 468    case ZPCI_FS_PERMANENT_ERROR:
 469    case ZPCI_FS_ERROR:
 470        setcc(cpu, ZPCI_PCI_LS_ERR);
 471        s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
 472        return 0;
 473    default:
 474        break;
 475    }
 476
 477    switch (pcias) {
 478    case ZPCI_IO_BAR_MIN...ZPCI_IO_BAR_MAX:
 479        if (!len || (len > (8 - (offset & 0x7)))) {
 480            s390_program_interrupt(env, PGM_OPERAND, ra);
 481            return 0;
 482        }
 483        result = zpci_read_bar(pbdev, pcias, offset, &data, len);
 484        if (result != MEMTX_OK) {
 485            s390_program_interrupt(env, PGM_OPERAND, ra);
 486            return 0;
 487        }
 488        break;
 489    case ZPCI_CONFIG_BAR:
 490        if (!len || (len > (4 - (offset & 0x3))) || len == 3) {
 491            s390_program_interrupt(env, PGM_OPERAND, ra);
 492            return 0;
 493        }
 494        data =  pci_host_config_read_common(
 495                   pbdev->pdev, offset, pci_config_size(pbdev->pdev), len);
 496
 497        if (zpci_endian_swap(&data, len)) {
 498            s390_program_interrupt(env, PGM_OPERAND, ra);
 499            return 0;
 500        }
 501        break;
 502    default:
 503        DPRINTF("pcilg invalid space\n");
 504        setcc(cpu, ZPCI_PCI_LS_ERR);
 505        s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
 506        return 0;
 507    }
 508
 509    pbdev->fmb.counter[ZPCI_FMB_CNT_LD]++;
 510
 511    env->regs[r1] = data;
 512    setcc(cpu, ZPCI_PCI_LS_OK);
 513    return 0;
 514}
 515
 516static MemTxResult zpci_write_bar(S390PCIBusDevice *pbdev, uint8_t pcias,
 517                                  uint64_t offset, uint64_t data, uint8_t len)
 518{
 519    MemoryRegion *mr;
 520
 521    mr = pbdev->pdev->io_regions[pcias].memory;
 522    mr = s390_get_subregion(mr, offset, len);
 523    offset -= mr->addr;
 524    return memory_region_dispatch_write(mr, offset, data,
 525                                        size_memop(len) | MO_BE,
 526                                        MEMTXATTRS_UNSPECIFIED);
 527}
 528
 529int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
 530{
 531    CPUS390XState *env = &cpu->env;
 532    uint64_t offset, data;
 533    S390PCIBusDevice *pbdev;
 534    MemTxResult result;
 535    uint8_t len;
 536    uint32_t fh;
 537    uint8_t pcias;
 538
 539    if (env->psw.mask & PSW_MASK_PSTATE) {
 540        s390_program_interrupt(env, PGM_PRIVILEGED, ra);
 541        return 0;
 542    }
 543
 544    if (r2 & 0x1) {
 545        s390_program_interrupt(env, PGM_SPECIFICATION, ra);
 546        return 0;
 547    }
 548
 549    fh = env->regs[r2] >> 32;
 550    pcias = (env->regs[r2] >> 16) & 0xf;
 551    len = env->regs[r2] & 0xf;
 552    offset = env->regs[r2 + 1];
 553    data = env->regs[r1];
 554
 555    if (!(fh & FH_MASK_ENABLE)) {
 556        setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
 557        return 0;
 558    }
 559
 560    pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
 561    if (!pbdev) {
 562        DPRINTF("pcistg no pci dev\n");
 563        setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
 564        return 0;
 565    }
 566
 567    switch (pbdev->state) {
 568    /* ZPCI_FS_RESERVED, ZPCI_FS_STANDBY and ZPCI_FS_DISABLED
 569     * are already covered by the FH_MASK_ENABLE check above
 570     */
 571    case ZPCI_FS_PERMANENT_ERROR:
 572    case ZPCI_FS_ERROR:
 573        setcc(cpu, ZPCI_PCI_LS_ERR);
 574        s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
 575        return 0;
 576    default:
 577        break;
 578    }
 579
 580    switch (pcias) {
 581        /* A ZPCI PCI card may use any BAR from BAR 0 to BAR 5 */
 582    case ZPCI_IO_BAR_MIN...ZPCI_IO_BAR_MAX:
 583        /* Check length:
 584         * A length of 0 is invalid and length should not cross a double word
 585         */
 586        if (!len || (len > (8 - (offset & 0x7)))) {
 587            s390_program_interrupt(env, PGM_OPERAND, ra);
 588            return 0;
 589        }
 590
 591        result = zpci_write_bar(pbdev, pcias, offset, data, len);
 592        if (result != MEMTX_OK) {
 593            s390_program_interrupt(env, PGM_OPERAND, ra);
 594            return 0;
 595        }
 596        break;
 597    case ZPCI_CONFIG_BAR:
 598        /* ZPCI uses the pseudo BAR number 15 as configuration space */
 599        /* possible access lengths are 1,2,4 and must not cross a word */
 600        if (!len || (len > (4 - (offset & 0x3))) || len == 3) {
 601            s390_program_interrupt(env, PGM_OPERAND, ra);
 602            return 0;
 603        }
 604        /* len = 1,2,4 so we do not need to test */
 605        zpci_endian_swap(&data, len);
 606        pci_host_config_write_common(pbdev->pdev, offset,
 607                                     pci_config_size(pbdev->pdev),
 608                                     data, len);
 609        break;
 610    default:
 611        DPRINTF("pcistg invalid space\n");
 612        setcc(cpu, ZPCI_PCI_LS_ERR);
 613        s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
 614        return 0;
 615    }
 616
 617    pbdev->fmb.counter[ZPCI_FMB_CNT_ST]++;
 618
 619    setcc(cpu, ZPCI_PCI_LS_OK);
 620    return 0;
 621}
 622
 623static uint32_t s390_pci_update_iotlb(S390PCIIOMMU *iommu,
 624                                      S390IOTLBEntry *entry)
 625{
 626    S390IOTLBEntry *cache = g_hash_table_lookup(iommu->iotlb, &entry->iova);
 627    IOMMUTLBEvent event = {
 628        .type = entry->perm ? IOMMU_NOTIFIER_MAP : IOMMU_NOTIFIER_UNMAP,
 629        .entry = {
 630            .target_as = &address_space_memory,
 631            .iova = entry->iova,
 632            .translated_addr = entry->translated_addr,
 633            .perm = entry->perm,
 634            .addr_mask = ~TARGET_PAGE_MASK,
 635        },
 636    };
 637
 638    if (event.type == IOMMU_NOTIFIER_UNMAP) {
 639        if (!cache) {
 640            goto out;
 641        }
 642        g_hash_table_remove(iommu->iotlb, &entry->iova);
 643        inc_dma_avail(iommu);
 644        /* Don't notify the iommu yet, maybe we can bundle contiguous unmaps */
 645        goto out;
 646    } else {
 647        if (cache) {
 648            if (cache->perm == entry->perm &&
 649                cache->translated_addr == entry->translated_addr) {
 650                goto out;
 651            }
 652
 653            event.type = IOMMU_NOTIFIER_UNMAP;
 654            event.entry.perm = IOMMU_NONE;
 655            memory_region_notify_iommu(&iommu->iommu_mr, 0, event);
 656            event.type = IOMMU_NOTIFIER_MAP;
 657            event.entry.perm = entry->perm;
 658        }
 659
 660        cache = g_new(S390IOTLBEntry, 1);
 661        cache->iova = entry->iova;
 662        cache->translated_addr = entry->translated_addr;
 663        cache->len = TARGET_PAGE_SIZE;
 664        cache->perm = entry->perm;
 665        g_hash_table_replace(iommu->iotlb, &cache->iova, cache);
 666        dec_dma_avail(iommu);
 667    }
 668
 669    /*
 670     * All associated iotlb entries have already been cleared, trigger the
 671     * unmaps.
 672     */
 673    memory_region_notify_iommu(&iommu->iommu_mr, 0, event);
 674
 675out:
 676    return iommu->dma_limit ? iommu->dma_limit->avail : 1;
 677}
 678
 679static void s390_pci_batch_unmap(S390PCIIOMMU *iommu, uint64_t iova,
 680                                 uint64_t len)
 681{
 682    uint64_t remain = len, start = iova, end = start + len - 1, mask, size;
 683    IOMMUTLBEvent event = {
 684        .type = IOMMU_NOTIFIER_UNMAP,
 685        .entry = {
 686            .target_as = &address_space_memory,
 687            .translated_addr = 0,
 688            .perm = IOMMU_NONE,
 689        },
 690    };
 691
 692    while (remain >= TARGET_PAGE_SIZE) {
 693        mask = dma_aligned_pow2_mask(start, end, 64);
 694        size = mask + 1;
 695        event.entry.iova = start;
 696        event.entry.addr_mask = mask;
 697        memory_region_notify_iommu(&iommu->iommu_mr, 0, event);
 698        start += size;
 699        remain -= size;
 700    }
 701}
 702
 703int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
 704{
 705    CPUS390XState *env = &cpu->env;
 706    uint64_t iova, coalesce = 0;
 707    uint32_t fh;
 708    uint16_t error = 0;
 709    S390PCIBusDevice *pbdev;
 710    S390PCIIOMMU *iommu;
 711    S390IOTLBEntry entry;
 712    hwaddr start, end, sstart;
 713    uint32_t dma_avail;
 714    bool again;
 715
 716    if (env->psw.mask & PSW_MASK_PSTATE) {
 717        s390_program_interrupt(env, PGM_PRIVILEGED, ra);
 718        return 0;
 719    }
 720
 721    if (r2 & 0x1) {
 722        s390_program_interrupt(env, PGM_SPECIFICATION, ra);
 723        return 0;
 724    }
 725
 726    fh = env->regs[r1] >> 32;
 727    sstart = start = env->regs[r2];
 728    end = start + env->regs[r2 + 1];
 729
 730    pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
 731    if (!pbdev) {
 732        DPRINTF("rpcit no pci dev\n");
 733        setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
 734        return 0;
 735    }
 736
 737    switch (pbdev->state) {
 738    case ZPCI_FS_RESERVED:
 739    case ZPCI_FS_STANDBY:
 740    case ZPCI_FS_DISABLED:
 741    case ZPCI_FS_PERMANENT_ERROR:
 742        setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
 743        return 0;
 744    case ZPCI_FS_ERROR:
 745        setcc(cpu, ZPCI_PCI_LS_ERR);
 746        s390_set_status_code(env, r1, ZPCI_MOD_ST_ERROR_RECOVER);
 747        return 0;
 748    default:
 749        break;
 750    }
 751
 752    iommu = pbdev->iommu;
 753    if (iommu->dma_limit) {
 754        dma_avail = iommu->dma_limit->avail;
 755    } else {
 756        dma_avail = 1;
 757    }
 758    if (!iommu->g_iota) {
 759        error = ERR_EVENT_INVALAS;
 760        goto err;
 761    }
 762
 763    if (end < iommu->pba || start > iommu->pal) {
 764        error = ERR_EVENT_OORANGE;
 765        goto err;
 766    }
 767
 768 retry:
 769    start = sstart;
 770    again = false;
 771    while (start < end) {
 772        error = s390_guest_io_table_walk(iommu->g_iota, start, &entry);
 773        if (error) {
 774            break;
 775        }
 776
 777        /*
 778         * If this is an unmap of a PTE, let's try to coalesce multiple unmaps
 779         * into as few notifier events as possible.
 780         */
 781        if (entry.perm == IOMMU_NONE && entry.len == TARGET_PAGE_SIZE) {
 782            if (coalesce == 0) {
 783                iova = entry.iova;
 784            }
 785            coalesce += entry.len;
 786        } else if (coalesce > 0) {
 787            /* Unleash the coalesced unmap before processing a new map */
 788            s390_pci_batch_unmap(iommu, iova, coalesce);
 789            coalesce = 0;
 790        }
 791
 792        start += entry.len;
 793        while (entry.iova < start && entry.iova < end) {
 794            if (dma_avail > 0 || entry.perm == IOMMU_NONE) {
 795                dma_avail = s390_pci_update_iotlb(iommu, &entry);
 796                entry.iova += TARGET_PAGE_SIZE;
 797                entry.translated_addr += TARGET_PAGE_SIZE;
 798            } else {
 799                /*
 800                 * We are unable to make a new mapping at this time, continue
 801                 * on and hopefully free up more space.  Then attempt another
 802                 * pass.
 803                 */
 804                again = true;
 805                break;
 806            }
 807        }
 808    }
 809    if (coalesce) {
 810            /* Unleash the coalesced unmap before finishing rpcit */
 811            s390_pci_batch_unmap(iommu, iova, coalesce);
 812            coalesce = 0;
 813    }
 814    if (again && dma_avail > 0)
 815        goto retry;
 816err:
 817    if (error) {
 818        pbdev->state = ZPCI_FS_ERROR;
 819        setcc(cpu, ZPCI_PCI_LS_ERR);
 820        s390_set_status_code(env, r1, ZPCI_PCI_ST_FUNC_IN_ERR);
 821        s390_pci_generate_error_event(error, pbdev->fh, pbdev->fid, start, 0);
 822    } else {
 823        pbdev->fmb.counter[ZPCI_FMB_CNT_RPCIT]++;
 824        if (dma_avail > 0) {
 825            setcc(cpu, ZPCI_PCI_LS_OK);
 826        } else {
 827            /* vfio DMA mappings are exhausted, trigger a RPCIT */
 828            setcc(cpu, ZPCI_PCI_LS_ERR);
 829            s390_set_status_code(env, r1, ZPCI_RPCIT_ST_INSUFF_RES);
 830        }
 831    }
 832    return 0;
 833}
 834
 835int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
 836                        uint8_t ar, uintptr_t ra)
 837{
 838    CPUS390XState *env = &cpu->env;
 839    S390PCIBusDevice *pbdev;
 840    MemoryRegion *mr;
 841    MemTxResult result;
 842    uint64_t offset;
 843    int i;
 844    uint32_t fh;
 845    uint8_t pcias;
 846    uint16_t len;
 847    uint8_t buffer[128];
 848
 849    if (env->psw.mask & PSW_MASK_PSTATE) {
 850        s390_program_interrupt(env, PGM_PRIVILEGED, ra);
 851        return 0;
 852    }
 853
 854    fh = env->regs[r1] >> 32;
 855    pcias = (env->regs[r1] >> 16) & 0xf;
 856    len = env->regs[r1] & 0x1fff;
 857    offset = env->regs[r3];
 858
 859    if (!(fh & FH_MASK_ENABLE)) {
 860        setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
 861        return 0;
 862    }
 863
 864    pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
 865    if (!pbdev) {
 866        DPRINTF("pcistb no pci dev fh 0x%x\n", fh);
 867        setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
 868        return 0;
 869    }
 870
 871    switch (pbdev->state) {
 872    case ZPCI_FS_PERMANENT_ERROR:
 873    case ZPCI_FS_ERROR:
 874        setcc(cpu, ZPCI_PCI_LS_ERR);
 875        s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED);
 876        return 0;
 877    default:
 878        break;
 879    }
 880
 881    if (pcias > ZPCI_IO_BAR_MAX) {
 882        DPRINTF("pcistb invalid space\n");
 883        setcc(cpu, ZPCI_PCI_LS_ERR);
 884        s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS);
 885        return 0;
 886    }
 887
 888    /* Verify the address, offset and length */
 889    /* offset must be a multiple of 8 */
 890    if (offset % 8) {
 891        goto specification_error;
 892    }
 893    /* Length must be greater than 8, a multiple of 8 */
 894    /* and not greater than maxstbl */
 895    if ((len <= 8) || (len % 8) ||
 896        (len > pbdev->pci_group->zpci_group.maxstbl)) {
 897        goto specification_error;
 898    }
 899    /* Do not cross a 4K-byte boundary */
 900    if (((offset & 0xfff) + len) > 0x1000) {
 901        goto specification_error;
 902    }
 903    /* Guest address must be double word aligned */
 904    if (gaddr & 0x07UL) {
 905        goto specification_error;
 906    }
 907
 908    mr = pbdev->pdev->io_regions[pcias].memory;
 909    mr = s390_get_subregion(mr, offset, len);
 910    offset -= mr->addr;
 911
 912    for (i = 0; i < len; i += 8) {
 913        if (!memory_region_access_valid(mr, offset + i, 8, true,
 914                                        MEMTXATTRS_UNSPECIFIED)) {
 915            s390_program_interrupt(env, PGM_OPERAND, ra);
 916            return 0;
 917        }
 918    }
 919
 920    if (s390_cpu_virt_mem_read(cpu, gaddr, ar, buffer, len)) {
 921        s390_cpu_virt_mem_handle_exc(cpu, ra);
 922        return 0;
 923    }
 924
 925    for (i = 0; i < len / 8; i++) {
 926        result = memory_region_dispatch_write(mr, offset + i * 8,
 927                                              ldq_p(buffer + i * 8),
 928                                              MO_64, MEMTXATTRS_UNSPECIFIED);
 929        if (result != MEMTX_OK) {
 930            s390_program_interrupt(env, PGM_OPERAND, ra);
 931            return 0;
 932        }
 933    }
 934
 935    pbdev->fmb.counter[ZPCI_FMB_CNT_STB]++;
 936
 937    setcc(cpu, ZPCI_PCI_LS_OK);
 938    return 0;
 939
 940specification_error:
 941    s390_program_interrupt(env, PGM_SPECIFICATION, ra);
 942    return 0;
 943}
 944
 945static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
 946{
 947    int ret, len;
 948    uint8_t isc = FIB_DATA_ISC(ldl_p(&fib.data));
 949
 950    pbdev->routes.adapter.adapter_id = css_get_adapter_id(
 951                                       CSS_IO_ADAPTER_PCI, isc);
 952    pbdev->summary_ind = get_indicator(ldq_p(&fib.aisb), sizeof(uint64_t));
 953    len = BITS_TO_LONGS(FIB_DATA_NOI(ldl_p(&fib.data))) * sizeof(unsigned long);
 954    pbdev->indicator = get_indicator(ldq_p(&fib.aibv), len);
 955
 956    ret = map_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
 957    if (ret) {
 958        goto out;
 959    }
 960
 961    ret = map_indicator(&pbdev->routes.adapter, pbdev->indicator);
 962    if (ret) {
 963        goto out;
 964    }
 965
 966    pbdev->routes.adapter.summary_addr = ldq_p(&fib.aisb);
 967    pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_p(&fib.data));
 968    pbdev->routes.adapter.ind_addr = ldq_p(&fib.aibv);
 969    pbdev->routes.adapter.ind_offset = FIB_DATA_AIBVO(ldl_p(&fib.data));
 970    pbdev->isc = isc;
 971    pbdev->noi = FIB_DATA_NOI(ldl_p(&fib.data));
 972    pbdev->sum = FIB_DATA_SUM(ldl_p(&fib.data));
 973
 974    DPRINTF("reg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id);
 975    return 0;
 976out:
 977    release_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
 978    release_indicator(&pbdev->routes.adapter, pbdev->indicator);
 979    pbdev->summary_ind = NULL;
 980    pbdev->indicator = NULL;
 981    return ret;
 982}
 983
 984int pci_dereg_irqs(S390PCIBusDevice *pbdev)
 985{
 986    release_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
 987    release_indicator(&pbdev->routes.adapter, pbdev->indicator);
 988
 989    pbdev->summary_ind = NULL;
 990    pbdev->indicator = NULL;
 991    pbdev->routes.adapter.summary_addr = 0;
 992    pbdev->routes.adapter.summary_offset = 0;
 993    pbdev->routes.adapter.ind_addr = 0;
 994    pbdev->routes.adapter.ind_offset = 0;
 995    pbdev->isc = 0;
 996    pbdev->noi = 0;
 997    pbdev->sum = 0;
 998
 999    DPRINTF("dereg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id);
1000    return 0;
1001}
1002
1003static int reg_ioat(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib,
1004                    uintptr_t ra)
1005{
1006    S390PCIIOMMU *iommu = pbdev->iommu;
1007    uint64_t pba = ldq_p(&fib.pba);
1008    uint64_t pal = ldq_p(&fib.pal);
1009    uint64_t g_iota = ldq_p(&fib.iota);
1010    uint8_t dt = (g_iota >> 2) & 0x7;
1011    uint8_t t = (g_iota >> 11) & 0x1;
1012
1013    pba &= ~0xfff;
1014    pal |= 0xfff;
1015    if (pba > pal || pba < pbdev->zpci_fn.sdma || pal > pbdev->zpci_fn.edma) {
1016        s390_program_interrupt(env, PGM_OPERAND, ra);
1017        return -EINVAL;
1018    }
1019
1020    /* currently we only support designation type 1 with translation */
1021    if (!(dt == ZPCI_IOTA_RTTO && t)) {
1022        error_report("unsupported ioat dt %d t %d", dt, t);
1023        s390_program_interrupt(env, PGM_OPERAND, ra);
1024        return -EINVAL;
1025    }
1026
1027    iommu->pba = pba;
1028    iommu->pal = pal;
1029    iommu->g_iota = g_iota;
1030
1031    s390_pci_iommu_enable(iommu);
1032
1033    return 0;
1034}
1035
1036void pci_dereg_ioat(S390PCIIOMMU *iommu)
1037{
1038    s390_pci_iommu_disable(iommu);
1039    iommu->pba = 0;
1040    iommu->pal = 0;
1041    iommu->g_iota = 0;
1042}
1043
1044void fmb_timer_free(S390PCIBusDevice *pbdev)
1045{
1046    if (pbdev->fmb_timer) {
1047        timer_free(pbdev->fmb_timer);
1048        pbdev->fmb_timer = NULL;
1049    }
1050    pbdev->fmb_addr = 0;
1051    memset(&pbdev->fmb, 0, sizeof(ZpciFmb));
1052}
1053
1054static int fmb_do_update(S390PCIBusDevice *pbdev, int offset, uint64_t val,
1055                         int len)
1056{
1057    MemTxResult ret;
1058    uint64_t dst = pbdev->fmb_addr + offset;
1059
1060    switch (len) {
1061    case 8:
1062        address_space_stq_be(&address_space_memory, dst, val,
1063                             MEMTXATTRS_UNSPECIFIED,
1064                             &ret);
1065        break;
1066    case 4:
1067        address_space_stl_be(&address_space_memory, dst, val,
1068                             MEMTXATTRS_UNSPECIFIED,
1069                             &ret);
1070        break;
1071    case 2:
1072        address_space_stw_be(&address_space_memory, dst, val,
1073                             MEMTXATTRS_UNSPECIFIED,
1074                             &ret);
1075        break;
1076    case 1:
1077        address_space_stb(&address_space_memory, dst, val,
1078                          MEMTXATTRS_UNSPECIFIED,
1079                          &ret);
1080        break;
1081    default:
1082        ret = MEMTX_ERROR;
1083        break;
1084    }
1085    if (ret != MEMTX_OK) {
1086        s390_pci_generate_error_event(ERR_EVENT_FMBA, pbdev->fh, pbdev->fid,
1087                                      pbdev->fmb_addr, 0);
1088        fmb_timer_free(pbdev);
1089    }
1090
1091    return ret;
1092}
1093
1094static void fmb_update(void *opaque)
1095{
1096    S390PCIBusDevice *pbdev = opaque;
1097    int64_t t = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
1098    int i;
1099
1100    /* Update U bit */
1101    pbdev->fmb.last_update *= 2;
1102    pbdev->fmb.last_update |= UPDATE_U_BIT;
1103    if (fmb_do_update(pbdev, offsetof(ZpciFmb, last_update),
1104                      pbdev->fmb.last_update,
1105                      sizeof(pbdev->fmb.last_update))) {
1106        return;
1107    }
1108
1109    /* Update FMB sample count */
1110    if (fmb_do_update(pbdev, offsetof(ZpciFmb, sample),
1111                      pbdev->fmb.sample++,
1112                      sizeof(pbdev->fmb.sample))) {
1113        return;
1114    }
1115
1116    /* Update FMB counters */
1117    for (i = 0; i < ZPCI_FMB_CNT_MAX; i++) {
1118        if (fmb_do_update(pbdev, offsetof(ZpciFmb, counter[i]),
1119                          pbdev->fmb.counter[i],
1120                          sizeof(pbdev->fmb.counter[0]))) {
1121            return;
1122        }
1123    }
1124
1125    /* Clear U bit and update the time */
1126    pbdev->fmb.last_update = time2tod(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
1127    pbdev->fmb.last_update *= 2;
1128    if (fmb_do_update(pbdev, offsetof(ZpciFmb, last_update),
1129                      pbdev->fmb.last_update,
1130                      sizeof(pbdev->fmb.last_update))) {
1131        return;
1132    }
1133    timer_mod(pbdev->fmb_timer, t + pbdev->pci_group->zpci_group.mui);
1134}
1135
1136static int mpcifc_reg_int_interp(S390PCIBusDevice *pbdev, ZpciFib *fib)
1137{
1138    int rc;
1139
1140    rc = s390_pci_kvm_aif_enable(pbdev, fib, pbdev->forwarding_assist);
1141    if (rc) {
1142        DPRINTF("Failed to enable interrupt forwarding\n");
1143        return rc;
1144    }
1145
1146    return 0;
1147}
1148
1149static int mpcifc_dereg_int_interp(S390PCIBusDevice *pbdev, ZpciFib *fib)
1150{
1151    int rc;
1152
1153    rc = s390_pci_kvm_aif_disable(pbdev);
1154    if (rc) {
1155        DPRINTF("Failed to disable interrupt forwarding\n");
1156        return rc;
1157    }
1158
1159    return 0;
1160}
1161
1162int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
1163                        uintptr_t ra)
1164{
1165    CPUS390XState *env = &cpu->env;
1166    uint8_t oc, dmaas;
1167    uint32_t fh;
1168    ZpciFib fib;
1169    S390PCIBusDevice *pbdev;
1170    uint64_t cc = ZPCI_PCI_LS_OK;
1171
1172    if (env->psw.mask & PSW_MASK_PSTATE) {
1173        s390_program_interrupt(env, PGM_PRIVILEGED, ra);
1174        return 0;
1175    }
1176
1177    oc = env->regs[r1] & 0xff;
1178    dmaas = (env->regs[r1] >> 16) & 0xff;
1179    fh = env->regs[r1] >> 32;
1180
1181    if (fiba & 0x7) {
1182        s390_program_interrupt(env, PGM_SPECIFICATION, ra);
1183        return 0;
1184    }
1185
1186    pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
1187    if (!pbdev) {
1188        DPRINTF("mpcifc no pci dev fh 0x%x\n", fh);
1189        setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1190        return 0;
1191    }
1192
1193    switch (pbdev->state) {
1194    case ZPCI_FS_RESERVED:
1195    case ZPCI_FS_STANDBY:
1196    case ZPCI_FS_DISABLED:
1197    case ZPCI_FS_PERMANENT_ERROR:
1198        setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1199        return 0;
1200    default:
1201        break;
1202    }
1203
1204    if (s390_cpu_virt_mem_read(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
1205        s390_cpu_virt_mem_handle_exc(cpu, ra);
1206        return 0;
1207    }
1208
1209    if (fib.fmt != 0) {
1210        s390_program_interrupt(env, PGM_OPERAND, ra);
1211        return 0;
1212    }
1213
1214    switch (oc) {
1215    case ZPCI_MOD_FC_REG_INT:
1216        if (pbdev->interp) {
1217            if (mpcifc_reg_int_interp(pbdev, &fib)) {
1218                cc = ZPCI_PCI_LS_ERR;
1219                s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1220            }
1221        } else if (pbdev->summary_ind) {
1222            cc = ZPCI_PCI_LS_ERR;
1223            s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1224        } else if (reg_irqs(env, pbdev, fib)) {
1225            cc = ZPCI_PCI_LS_ERR;
1226            s390_set_status_code(env, r1, ZPCI_MOD_ST_RES_NOT_AVAIL);
1227        }
1228        break;
1229    case ZPCI_MOD_FC_DEREG_INT:
1230        if (pbdev->interp) {
1231            if (mpcifc_dereg_int_interp(pbdev, &fib)) {
1232                cc = ZPCI_PCI_LS_ERR;
1233                s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1234            }
1235        } else if (!pbdev->summary_ind) {
1236            cc = ZPCI_PCI_LS_ERR;
1237            s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1238        } else {
1239            pci_dereg_irqs(pbdev);
1240        }
1241        break;
1242    case ZPCI_MOD_FC_REG_IOAT:
1243        if (dmaas != 0) {
1244            cc = ZPCI_PCI_LS_ERR;
1245            s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
1246        } else if (pbdev->iommu->enabled) {
1247            cc = ZPCI_PCI_LS_ERR;
1248            s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1249        } else if (reg_ioat(env, pbdev, fib, ra)) {
1250            cc = ZPCI_PCI_LS_ERR;
1251            s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES);
1252        }
1253        break;
1254    case ZPCI_MOD_FC_DEREG_IOAT:
1255        if (dmaas != 0) {
1256            cc = ZPCI_PCI_LS_ERR;
1257            s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
1258        } else if (!pbdev->iommu->enabled) {
1259            cc = ZPCI_PCI_LS_ERR;
1260            s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1261        } else {
1262            pci_dereg_ioat(pbdev->iommu);
1263        }
1264        break;
1265    case ZPCI_MOD_FC_REREG_IOAT:
1266        if (dmaas != 0) {
1267            cc = ZPCI_PCI_LS_ERR;
1268            s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
1269        } else if (!pbdev->iommu->enabled) {
1270            cc = ZPCI_PCI_LS_ERR;
1271            s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1272        } else {
1273            pci_dereg_ioat(pbdev->iommu);
1274            if (reg_ioat(env, pbdev, fib, ra)) {
1275                cc = ZPCI_PCI_LS_ERR;
1276                s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES);
1277            }
1278        }
1279        break;
1280    case ZPCI_MOD_FC_RESET_ERROR:
1281        switch (pbdev->state) {
1282        case ZPCI_FS_BLOCKED:
1283        case ZPCI_FS_ERROR:
1284            pbdev->state = ZPCI_FS_ENABLED;
1285            break;
1286        default:
1287            cc = ZPCI_PCI_LS_ERR;
1288            s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1289        }
1290        break;
1291    case ZPCI_MOD_FC_RESET_BLOCK:
1292        switch (pbdev->state) {
1293        case ZPCI_FS_ERROR:
1294            pbdev->state = ZPCI_FS_BLOCKED;
1295            break;
1296        default:
1297            cc = ZPCI_PCI_LS_ERR;
1298            s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1299        }
1300        break;
1301    case ZPCI_MOD_FC_SET_MEASURE: {
1302        uint64_t fmb_addr = ldq_p(&fib.fmb_addr);
1303
1304        if (fmb_addr & FMBK_MASK) {
1305            cc = ZPCI_PCI_LS_ERR;
1306            s390_pci_generate_error_event(ERR_EVENT_FMBPRO, pbdev->fh,
1307                                          pbdev->fid, fmb_addr, 0);
1308            fmb_timer_free(pbdev);
1309            break;
1310        }
1311
1312        if (!fmb_addr) {
1313            /* Stop updating FMB. */
1314            fmb_timer_free(pbdev);
1315            break;
1316        }
1317
1318        if (!pbdev->fmb_timer) {
1319            pbdev->fmb_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
1320                                            fmb_update, pbdev);
1321        } else if (timer_pending(pbdev->fmb_timer)) {
1322            /* Remove pending timer to update FMB address. */
1323            timer_del(pbdev->fmb_timer);
1324        }
1325        pbdev->fmb_addr = fmb_addr;
1326        timer_mod(pbdev->fmb_timer,
1327                  qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
1328                                    pbdev->pci_group->zpci_group.mui);
1329        break;
1330    }
1331    default:
1332        s390_program_interrupt(&cpu->env, PGM_OPERAND, ra);
1333        cc = ZPCI_PCI_LS_ERR;
1334    }
1335
1336    setcc(cpu, cc);
1337    return 0;
1338}
1339
1340int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
1341                         uintptr_t ra)
1342{
1343    CPUS390XState *env = &cpu->env;
1344    uint8_t dmaas;
1345    uint32_t fh;
1346    ZpciFib fib;
1347    S390PCIBusDevice *pbdev;
1348    uint32_t data;
1349    uint64_t cc = ZPCI_PCI_LS_OK;
1350
1351    if (env->psw.mask & PSW_MASK_PSTATE) {
1352        s390_program_interrupt(env, PGM_PRIVILEGED, ra);
1353        return 0;
1354    }
1355
1356    fh = env->regs[r1] >> 32;
1357    dmaas = (env->regs[r1] >> 16) & 0xff;
1358
1359    if (dmaas) {
1360        setcc(cpu, ZPCI_PCI_LS_ERR);
1361        s390_set_status_code(env, r1, ZPCI_STPCIFC_ST_INVAL_DMAAS);
1362        return 0;
1363    }
1364
1365    if (fiba & 0x7) {
1366        s390_program_interrupt(env, PGM_SPECIFICATION, ra);
1367        return 0;
1368    }
1369
1370    pbdev = s390_pci_find_dev_by_idx(s390_get_phb(), fh & FH_MASK_INDEX);
1371    if (!pbdev) {
1372        setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1373        return 0;
1374    }
1375
1376    memset(&fib, 0, sizeof(fib));
1377
1378    switch (pbdev->state) {
1379    case ZPCI_FS_RESERVED:
1380    case ZPCI_FS_STANDBY:
1381        setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1382        return 0;
1383    case ZPCI_FS_DISABLED:
1384        if (fh & FH_MASK_ENABLE) {
1385            setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1386            return 0;
1387        }
1388        goto out;
1389    /* BLOCKED bit is set to one coincident with the setting of ERROR bit.
1390     * FH Enabled bit is set to one in states of ENABLED, BLOCKED or ERROR. */
1391    case ZPCI_FS_ERROR:
1392        fib.fc |= 0x20;
1393        /* fallthrough */
1394    case ZPCI_FS_BLOCKED:
1395        fib.fc |= 0x40;
1396        /* fallthrough */
1397    case ZPCI_FS_ENABLED:
1398        fib.fc |= 0x80;
1399        if (pbdev->iommu->enabled) {
1400            fib.fc |= 0x10;
1401        }
1402        if (!(fh & FH_MASK_ENABLE)) {
1403            env->regs[r1] |= 1ULL << 63;
1404        }
1405        break;
1406    case ZPCI_FS_PERMANENT_ERROR:
1407        setcc(cpu, ZPCI_PCI_LS_ERR);
1408        s390_set_status_code(env, r1, ZPCI_STPCIFC_ST_PERM_ERROR);
1409        return 0;
1410    }
1411
1412    stq_p(&fib.pba, pbdev->iommu->pba);
1413    stq_p(&fib.pal, pbdev->iommu->pal);
1414    stq_p(&fib.iota, pbdev->iommu->g_iota);
1415    stq_p(&fib.aibv, pbdev->routes.adapter.ind_addr);
1416    stq_p(&fib.aisb, pbdev->routes.adapter.summary_addr);
1417    stq_p(&fib.fmb_addr, pbdev->fmb_addr);
1418
1419    data = ((uint32_t)pbdev->isc << 28) | ((uint32_t)pbdev->noi << 16) |
1420           ((uint32_t)pbdev->routes.adapter.ind_offset << 8) |
1421           ((uint32_t)pbdev->sum << 7) | pbdev->routes.adapter.summary_offset;
1422    stl_p(&fib.data, data);
1423
1424out:
1425    if (s390_cpu_virt_mem_write(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
1426        s390_cpu_virt_mem_handle_exc(cpu, ra);
1427        return 0;
1428    }
1429
1430    setcc(cpu, cc);
1431    return 0;
1432}
1433