qemu/hw/xen/xen_pt_config_init.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2007, Neocleus Corporation.
   3 * Copyright (c) 2007, Intel Corporation.
   4 *
   5 * This work is licensed under the terms of the GNU GPL, version 2.  See
   6 * the COPYING file in the top-level directory.
   7 *
   8 * Alex Novik <alex@neocleus.com>
   9 * Allen Kay <allen.m.kay@intel.com>
  10 * Guy Zana <guy@neocleus.com>
  11 *
  12 * This file implements direct PCI assignment to a HVM guest
  13 */
  14
  15#include "qemu/osdep.h"
  16#include "qapi/error.h"
  17#include "qemu/timer.h"
  18#include "hw/xen/xen_backend.h"
  19#include "xen_pt.h"
  20
  21#define XEN_PT_MERGE_VALUE(value, data, val_mask) \
  22    (((value) & (val_mask)) | ((data) & ~(val_mask)))
  23
  24#define XEN_PT_INVALID_REG          0xFFFFFFFF      /* invalid register value */
  25
  26/* prototype */
  27
  28static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg,
  29                               uint32_t real_offset, uint32_t *data);
  30
  31
  32/* helper */
  33
  34/* A return value of 1 means the capability should NOT be exposed to guest. */
  35static int xen_pt_hide_dev_cap(const XenHostPCIDevice *d, uint8_t grp_id)
  36{
  37    switch (grp_id) {
  38    case PCI_CAP_ID_EXP:
  39        /* The PCI Express Capability Structure of the VF of Intel 82599 10GbE
  40         * Controller looks trivial, e.g., the PCI Express Capabilities
  41         * Register is 0. We should not try to expose it to guest.
  42         *
  43         * The datasheet is available at
  44         * http://download.intel.com/design/network/datashts/82599_datasheet.pdf
  45         *
  46         * See 'Table 9.7. VF PCIe Configuration Space' of the datasheet, the
  47         * PCI Express Capability Structure of the VF of Intel 82599 10GbE
  48         * Controller looks trivial, e.g., the PCI Express Capabilities
  49         * Register is 0, so the Capability Version is 0 and
  50         * xen_pt_pcie_size_init() would fail.
  51         */
  52        if (d->vendor_id == PCI_VENDOR_ID_INTEL &&
  53            d->device_id == PCI_DEVICE_ID_INTEL_82599_SFP_VF) {
  54            return 1;
  55        }
  56        break;
  57    }
  58    return 0;
  59}
  60
  61/*   find emulate register group entry */
  62XenPTRegGroup *xen_pt_find_reg_grp(XenPCIPassthroughState *s, uint32_t address)
  63{
  64    XenPTRegGroup *entry = NULL;
  65
  66    /* find register group entry */
  67    QLIST_FOREACH(entry, &s->reg_grps, entries) {
  68        /* check address */
  69        if ((entry->base_offset <= address)
  70            && ((entry->base_offset + entry->size) > address)) {
  71            return entry;
  72        }
  73    }
  74
  75    /* group entry not found */
  76    return NULL;
  77}
  78
  79/* find emulate register entry */
  80XenPTReg *xen_pt_find_reg(XenPTRegGroup *reg_grp, uint32_t address)
  81{
  82    XenPTReg *reg_entry = NULL;
  83    XenPTRegInfo *reg = NULL;
  84    uint32_t real_offset = 0;
  85
  86    /* find register entry */
  87    QLIST_FOREACH(reg_entry, &reg_grp->reg_tbl_list, entries) {
  88        reg = reg_entry->reg;
  89        real_offset = reg_grp->base_offset + reg->offset;
  90        /* check address */
  91        if ((real_offset <= address)
  92            && ((real_offset + reg->size) > address)) {
  93            return reg_entry;
  94        }
  95    }
  96
  97    return NULL;
  98}
  99
 100static uint32_t get_throughable_mask(const XenPCIPassthroughState *s,
 101                                     XenPTRegInfo *reg, uint32_t valid_mask)
 102{
 103    uint32_t throughable_mask = ~(reg->emu_mask | reg->ro_mask);
 104
 105    if (!s->permissive) {
 106        throughable_mask &= ~reg->res_mask;
 107    }
 108
 109    return throughable_mask & valid_mask;
 110}
 111
 112/****************
 113 * general register functions
 114 */
 115
 116/* register initialization function */
 117
 118static int xen_pt_common_reg_init(XenPCIPassthroughState *s,
 119                                  XenPTRegInfo *reg, uint32_t real_offset,
 120                                  uint32_t *data)
 121{
 122    *data = reg->init_val;
 123    return 0;
 124}
 125
 126/* Read register functions */
 127
 128static int xen_pt_byte_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
 129                                uint8_t *value, uint8_t valid_mask)
 130{
 131    XenPTRegInfo *reg = cfg_entry->reg;
 132    uint8_t valid_emu_mask = 0;
 133    uint8_t *data = cfg_entry->ptr.byte;
 134
 135    /* emulate byte register */
 136    valid_emu_mask = reg->emu_mask & valid_mask;
 137    *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask);
 138
 139    return 0;
 140}
 141static int xen_pt_word_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
 142                                uint16_t *value, uint16_t valid_mask)
 143{
 144    XenPTRegInfo *reg = cfg_entry->reg;
 145    uint16_t valid_emu_mask = 0;
 146    uint16_t *data = cfg_entry->ptr.half_word;
 147
 148    /* emulate word register */
 149    valid_emu_mask = reg->emu_mask & valid_mask;
 150    *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask);
 151
 152    return 0;
 153}
 154static int xen_pt_long_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
 155                                uint32_t *value, uint32_t valid_mask)
 156{
 157    XenPTRegInfo *reg = cfg_entry->reg;
 158    uint32_t valid_emu_mask = 0;
 159    uint32_t *data = cfg_entry->ptr.word;
 160
 161    /* emulate long register */
 162    valid_emu_mask = reg->emu_mask & valid_mask;
 163    *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask);
 164
 165    return 0;
 166}
 167
 168/* Write register functions */
 169
 170static int xen_pt_byte_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
 171                                 uint8_t *val, uint8_t dev_value,
 172                                 uint8_t valid_mask)
 173{
 174    XenPTRegInfo *reg = cfg_entry->reg;
 175    uint8_t writable_mask = 0;
 176    uint8_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
 177    uint8_t *data = cfg_entry->ptr.byte;
 178
 179    /* modify emulate register */
 180    writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
 181    *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
 182
 183    /* create value for writing to I/O device register */
 184    *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask,
 185                              throughable_mask);
 186
 187    return 0;
 188}
 189static int xen_pt_word_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
 190                                 uint16_t *val, uint16_t dev_value,
 191                                 uint16_t valid_mask)
 192{
 193    XenPTRegInfo *reg = cfg_entry->reg;
 194    uint16_t writable_mask = 0;
 195    uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
 196    uint16_t *data = cfg_entry->ptr.half_word;
 197
 198    /* modify emulate register */
 199    writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
 200    *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
 201
 202    /* create value for writing to I/O device register */
 203    *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask,
 204                              throughable_mask);
 205
 206    return 0;
 207}
 208static int xen_pt_long_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
 209                                 uint32_t *val, uint32_t dev_value,
 210                                 uint32_t valid_mask)
 211{
 212    XenPTRegInfo *reg = cfg_entry->reg;
 213    uint32_t writable_mask = 0;
 214    uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
 215    uint32_t *data = cfg_entry->ptr.word;
 216
 217    /* modify emulate register */
 218    writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
 219    *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
 220
 221    /* create value for writing to I/O device register */
 222    *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask,
 223                              throughable_mask);
 224
 225    return 0;
 226}
 227
 228
 229/* XenPTRegInfo declaration
 230 * - only for emulated register (either a part or whole bit).
 231 * - for passthrough register that need special behavior (like interacting with
 232 *   other component), set emu_mask to all 0 and specify r/w func properly.
 233 * - do NOT use ALL F for init_val, otherwise the tbl will not be registered.
 234 */
 235
 236/********************
 237 * Header Type0
 238 */
 239
 240static int xen_pt_vendor_reg_init(XenPCIPassthroughState *s,
 241                                  XenPTRegInfo *reg, uint32_t real_offset,
 242                                  uint32_t *data)
 243{
 244    *data = s->real_device.vendor_id;
 245    return 0;
 246}
 247static int xen_pt_device_reg_init(XenPCIPassthroughState *s,
 248                                  XenPTRegInfo *reg, uint32_t real_offset,
 249                                  uint32_t *data)
 250{
 251    *data = s->real_device.device_id;
 252    return 0;
 253}
 254static int xen_pt_status_reg_init(XenPCIPassthroughState *s,
 255                                  XenPTRegInfo *reg, uint32_t real_offset,
 256                                  uint32_t *data)
 257{
 258    XenPTRegGroup *reg_grp_entry = NULL;
 259    XenPTReg *reg_entry = NULL;
 260    uint32_t reg_field = 0;
 261
 262    /* find Header register group */
 263    reg_grp_entry = xen_pt_find_reg_grp(s, PCI_CAPABILITY_LIST);
 264    if (reg_grp_entry) {
 265        /* find Capabilities Pointer register */
 266        reg_entry = xen_pt_find_reg(reg_grp_entry, PCI_CAPABILITY_LIST);
 267        if (reg_entry) {
 268            /* check Capabilities Pointer register */
 269            if (*reg_entry->ptr.half_word) {
 270                reg_field |= PCI_STATUS_CAP_LIST;
 271            } else {
 272                reg_field &= ~PCI_STATUS_CAP_LIST;
 273            }
 274        } else {
 275            xen_shutdown_fatal_error("Internal error: Couldn't find XenPTReg*"
 276                                     " for Capabilities Pointer register."
 277                                     " (%s)\n", __func__);
 278            return -1;
 279        }
 280    } else {
 281        xen_shutdown_fatal_error("Internal error: Couldn't find XenPTRegGroup"
 282                                 " for Header. (%s)\n", __func__);
 283        return -1;
 284    }
 285
 286    *data = reg_field;
 287    return 0;
 288}
 289static int xen_pt_header_type_reg_init(XenPCIPassthroughState *s,
 290                                       XenPTRegInfo *reg, uint32_t real_offset,
 291                                       uint32_t *data)
 292{
 293    /* read PCI_HEADER_TYPE */
 294    *data = reg->init_val | 0x80;
 295    return 0;
 296}
 297
 298/* initialize Interrupt Pin register */
 299static int xen_pt_irqpin_reg_init(XenPCIPassthroughState *s,
 300                                  XenPTRegInfo *reg, uint32_t real_offset,
 301                                  uint32_t *data)
 302{
 303    *data = xen_pt_pci_read_intx(s);
 304    return 0;
 305}
 306
 307/* Command register */
 308static int xen_pt_cmd_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
 309                                uint16_t *val, uint16_t dev_value,
 310                                uint16_t valid_mask)
 311{
 312    XenPTRegInfo *reg = cfg_entry->reg;
 313    uint16_t writable_mask = 0;
 314    uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
 315    uint16_t *data = cfg_entry->ptr.half_word;
 316
 317    /* modify emulate register */
 318    writable_mask = ~reg->ro_mask & valid_mask;
 319    *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
 320
 321    /* create value for writing to I/O device register */
 322    if (*val & PCI_COMMAND_INTX_DISABLE) {
 323        throughable_mask |= PCI_COMMAND_INTX_DISABLE;
 324    } else {
 325        if (s->machine_irq) {
 326            throughable_mask |= PCI_COMMAND_INTX_DISABLE;
 327        }
 328    }
 329
 330    *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
 331
 332    return 0;
 333}
 334
 335/* BAR */
 336#define XEN_PT_BAR_MEM_RO_MASK    0x0000000F  /* BAR ReadOnly mask(Memory) */
 337#define XEN_PT_BAR_MEM_EMU_MASK   0xFFFFFFF0  /* BAR emul mask(Memory) */
 338#define XEN_PT_BAR_IO_RO_MASK     0x00000003  /* BAR ReadOnly mask(I/O) */
 339#define XEN_PT_BAR_IO_EMU_MASK    0xFFFFFFFC  /* BAR emul mask(I/O) */
 340
 341static bool is_64bit_bar(PCIIORegion *r)
 342{
 343    return !!(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64);
 344}
 345
 346static uint64_t xen_pt_get_bar_size(PCIIORegion *r)
 347{
 348    if (is_64bit_bar(r)) {
 349        uint64_t size64;
 350        size64 = (r + 1)->size;
 351        size64 <<= 32;
 352        size64 += r->size;
 353        return size64;
 354    }
 355    return r->size;
 356}
 357
 358static XenPTBarFlag xen_pt_bar_reg_parse(XenPCIPassthroughState *s,
 359                                         int index)
 360{
 361    PCIDevice *d = &s->dev;
 362    XenPTRegion *region = NULL;
 363    PCIIORegion *r;
 364
 365    /* check 64bit BAR */
 366    if ((0 < index) && (index < PCI_ROM_SLOT)) {
 367        int type = s->real_device.io_regions[index - 1].type;
 368
 369        if ((type & XEN_HOST_PCI_REGION_TYPE_MEM)
 370            && (type & XEN_HOST_PCI_REGION_TYPE_MEM_64)) {
 371            region = &s->bases[index - 1];
 372            if (region->bar_flag != XEN_PT_BAR_FLAG_UPPER) {
 373                return XEN_PT_BAR_FLAG_UPPER;
 374            }
 375        }
 376    }
 377
 378    /* check unused BAR */
 379    r = &d->io_regions[index];
 380    if (!xen_pt_get_bar_size(r)) {
 381        return XEN_PT_BAR_FLAG_UNUSED;
 382    }
 383
 384    /* for ExpROM BAR */
 385    if (index == PCI_ROM_SLOT) {
 386        return XEN_PT_BAR_FLAG_MEM;
 387    }
 388
 389    /* check BAR I/O indicator */
 390    if (s->real_device.io_regions[index].type & XEN_HOST_PCI_REGION_TYPE_IO) {
 391        return XEN_PT_BAR_FLAG_IO;
 392    } else {
 393        return XEN_PT_BAR_FLAG_MEM;
 394    }
 395}
 396
 397static inline uint32_t base_address_with_flags(XenHostPCIIORegion *hr)
 398{
 399    if (hr->type & XEN_HOST_PCI_REGION_TYPE_IO) {
 400        return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_IO_MASK);
 401    } else {
 402        return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_MEM_MASK);
 403    }
 404}
 405
 406static int xen_pt_bar_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg,
 407                               uint32_t real_offset, uint32_t *data)
 408{
 409    uint32_t reg_field = 0;
 410    int index;
 411
 412    index = xen_pt_bar_offset_to_index(reg->offset);
 413    if (index < 0 || index >= PCI_NUM_REGIONS) {
 414        XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index);
 415        return -1;
 416    }
 417
 418    /* set BAR flag */
 419    s->bases[index].bar_flag = xen_pt_bar_reg_parse(s, index);
 420    if (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED) {
 421        reg_field = XEN_PT_INVALID_REG;
 422    }
 423
 424    *data = reg_field;
 425    return 0;
 426}
 427static int xen_pt_bar_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
 428                               uint32_t *value, uint32_t valid_mask)
 429{
 430    XenPTRegInfo *reg = cfg_entry->reg;
 431    uint32_t valid_emu_mask = 0;
 432    uint32_t bar_emu_mask = 0;
 433    int index;
 434
 435    /* get BAR index */
 436    index = xen_pt_bar_offset_to_index(reg->offset);
 437    if (index < 0 || index >= PCI_NUM_REGIONS - 1) {
 438        XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index);
 439        return -1;
 440    }
 441
 442    /* use fixed-up value from kernel sysfs */
 443    *value = base_address_with_flags(&s->real_device.io_regions[index]);
 444
 445    /* set emulate mask depend on BAR flag */
 446    switch (s->bases[index].bar_flag) {
 447    case XEN_PT_BAR_FLAG_MEM:
 448        bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK;
 449        break;
 450    case XEN_PT_BAR_FLAG_IO:
 451        bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK;
 452        break;
 453    case XEN_PT_BAR_FLAG_UPPER:
 454        bar_emu_mask = XEN_PT_BAR_ALLF;
 455        break;
 456    default:
 457        break;
 458    }
 459
 460    /* emulate BAR */
 461    valid_emu_mask = bar_emu_mask & valid_mask;
 462    *value = XEN_PT_MERGE_VALUE(*value, *cfg_entry->ptr.word, ~valid_emu_mask);
 463
 464    return 0;
 465}
 466static int xen_pt_bar_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
 467                                uint32_t *val, uint32_t dev_value,
 468                                uint32_t valid_mask)
 469{
 470    XenPTRegInfo *reg = cfg_entry->reg;
 471    XenPTRegion *base = NULL;
 472    PCIDevice *d = &s->dev;
 473    const PCIIORegion *r;
 474    uint32_t writable_mask = 0;
 475    uint32_t bar_emu_mask = 0;
 476    uint32_t bar_ro_mask = 0;
 477    uint32_t r_size = 0;
 478    int index = 0;
 479    uint32_t *data = cfg_entry->ptr.word;
 480
 481    index = xen_pt_bar_offset_to_index(reg->offset);
 482    if (index < 0 || index >= PCI_NUM_REGIONS) {
 483        XEN_PT_ERR(d, "Internal error: Invalid BAR index [%d].\n", index);
 484        return -1;
 485    }
 486
 487    r = &d->io_regions[index];
 488    base = &s->bases[index];
 489    r_size = xen_pt_get_emul_size(base->bar_flag, r->size);
 490
 491    /* set emulate mask and read-only mask values depend on the BAR flag */
 492    switch (s->bases[index].bar_flag) {
 493    case XEN_PT_BAR_FLAG_MEM:
 494        bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK;
 495        if (!r_size) {
 496            /* low 32 bits mask for 64 bit bars */
 497            bar_ro_mask = XEN_PT_BAR_ALLF;
 498        } else {
 499            bar_ro_mask = XEN_PT_BAR_MEM_RO_MASK | (r_size - 1);
 500        }
 501        break;
 502    case XEN_PT_BAR_FLAG_IO:
 503        bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK;
 504        bar_ro_mask = XEN_PT_BAR_IO_RO_MASK | (r_size - 1);
 505        break;
 506    case XEN_PT_BAR_FLAG_UPPER:
 507        bar_emu_mask = XEN_PT_BAR_ALLF;
 508        bar_ro_mask = r_size ? r_size - 1 : 0;
 509        break;
 510    default:
 511        break;
 512    }
 513
 514    /* modify emulate register */
 515    writable_mask = bar_emu_mask & ~bar_ro_mask & valid_mask;
 516    *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
 517
 518    /* check whether we need to update the virtual region address or not */
 519    switch (s->bases[index].bar_flag) {
 520    case XEN_PT_BAR_FLAG_UPPER:
 521    case XEN_PT_BAR_FLAG_MEM:
 522        /* nothing to do */
 523        break;
 524    case XEN_PT_BAR_FLAG_IO:
 525        /* nothing to do */
 526        break;
 527    default:
 528        break;
 529    }
 530
 531    /* create value for writing to I/O device register */
 532    *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
 533
 534    return 0;
 535}
 536
 537/* write Exp ROM BAR */
 538static int xen_pt_exp_rom_bar_reg_write(XenPCIPassthroughState *s,
 539                                        XenPTReg *cfg_entry, uint32_t *val,
 540                                        uint32_t dev_value, uint32_t valid_mask)
 541{
 542    XenPTRegInfo *reg = cfg_entry->reg;
 543    XenPTRegion *base = NULL;
 544    PCIDevice *d = (PCIDevice *)&s->dev;
 545    uint32_t writable_mask = 0;
 546    uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
 547    pcibus_t r_size = 0;
 548    uint32_t bar_ro_mask = 0;
 549    uint32_t *data = cfg_entry->ptr.word;
 550
 551    r_size = d->io_regions[PCI_ROM_SLOT].size;
 552    base = &s->bases[PCI_ROM_SLOT];
 553    /* align memory type resource size */
 554    r_size = xen_pt_get_emul_size(base->bar_flag, r_size);
 555
 556    /* set emulate mask and read-only mask */
 557    bar_ro_mask = (reg->ro_mask | (r_size - 1)) & ~PCI_ROM_ADDRESS_ENABLE;
 558
 559    /* modify emulate register */
 560    writable_mask = ~bar_ro_mask & valid_mask;
 561    *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
 562
 563    /* create value for writing to I/O device register */
 564    *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
 565
 566    return 0;
 567}
 568
 569static int xen_pt_intel_opregion_read(XenPCIPassthroughState *s,
 570                                      XenPTReg *cfg_entry,
 571                                      uint32_t *value, uint32_t valid_mask)
 572{
 573    *value = igd_read_opregion(s);
 574    return 0;
 575}
 576
 577static int xen_pt_intel_opregion_write(XenPCIPassthroughState *s,
 578                                       XenPTReg *cfg_entry, uint32_t *value,
 579                                       uint32_t dev_value, uint32_t valid_mask)
 580{
 581    igd_write_opregion(s, *value);
 582    return 0;
 583}
 584
 585/* Header Type0 reg static information table */
 586static XenPTRegInfo xen_pt_emu_reg_header0[] = {
 587    /* Vendor ID reg */
 588    {
 589        .offset     = PCI_VENDOR_ID,
 590        .size       = 2,
 591        .init_val   = 0x0000,
 592        .ro_mask    = 0xFFFF,
 593        .emu_mask   = 0xFFFF,
 594        .init       = xen_pt_vendor_reg_init,
 595        .u.w.read   = xen_pt_word_reg_read,
 596        .u.w.write  = xen_pt_word_reg_write,
 597    },
 598    /* Device ID reg */
 599    {
 600        .offset     = PCI_DEVICE_ID,
 601        .size       = 2,
 602        .init_val   = 0x0000,
 603        .ro_mask    = 0xFFFF,
 604        .emu_mask   = 0xFFFF,
 605        .init       = xen_pt_device_reg_init,
 606        .u.w.read   = xen_pt_word_reg_read,
 607        .u.w.write  = xen_pt_word_reg_write,
 608    },
 609    /* Command reg */
 610    {
 611        .offset     = PCI_COMMAND,
 612        .size       = 2,
 613        .init_val   = 0x0000,
 614        .res_mask   = 0xF880,
 615        .emu_mask   = 0x0743,
 616        .init       = xen_pt_common_reg_init,
 617        .u.w.read   = xen_pt_word_reg_read,
 618        .u.w.write  = xen_pt_cmd_reg_write,
 619    },
 620    /* Capabilities Pointer reg */
 621    {
 622        .offset     = PCI_CAPABILITY_LIST,
 623        .size       = 1,
 624        .init_val   = 0x00,
 625        .ro_mask    = 0xFF,
 626        .emu_mask   = 0xFF,
 627        .init       = xen_pt_ptr_reg_init,
 628        .u.b.read   = xen_pt_byte_reg_read,
 629        .u.b.write  = xen_pt_byte_reg_write,
 630    },
 631    /* Status reg */
 632    /* use emulated Cap Ptr value to initialize,
 633     * so need to be declared after Cap Ptr reg
 634     */
 635    {
 636        .offset     = PCI_STATUS,
 637        .size       = 2,
 638        .init_val   = 0x0000,
 639        .res_mask   = 0x0007,
 640        .ro_mask    = 0x06F8,
 641        .rw1c_mask  = 0xF900,
 642        .emu_mask   = 0x0010,
 643        .init       = xen_pt_status_reg_init,
 644        .u.w.read   = xen_pt_word_reg_read,
 645        .u.w.write  = xen_pt_word_reg_write,
 646    },
 647    /* Cache Line Size reg */
 648    {
 649        .offset     = PCI_CACHE_LINE_SIZE,
 650        .size       = 1,
 651        .init_val   = 0x00,
 652        .ro_mask    = 0x00,
 653        .emu_mask   = 0xFF,
 654        .init       = xen_pt_common_reg_init,
 655        .u.b.read   = xen_pt_byte_reg_read,
 656        .u.b.write  = xen_pt_byte_reg_write,
 657    },
 658    /* Latency Timer reg */
 659    {
 660        .offset     = PCI_LATENCY_TIMER,
 661        .size       = 1,
 662        .init_val   = 0x00,
 663        .ro_mask    = 0x00,
 664        .emu_mask   = 0xFF,
 665        .init       = xen_pt_common_reg_init,
 666        .u.b.read   = xen_pt_byte_reg_read,
 667        .u.b.write  = xen_pt_byte_reg_write,
 668    },
 669    /* Header Type reg */
 670    {
 671        .offset     = PCI_HEADER_TYPE,
 672        .size       = 1,
 673        .init_val   = 0x00,
 674        .ro_mask    = 0xFF,
 675        .emu_mask   = 0x00,
 676        .init       = xen_pt_header_type_reg_init,
 677        .u.b.read   = xen_pt_byte_reg_read,
 678        .u.b.write  = xen_pt_byte_reg_write,
 679    },
 680    /* Interrupt Line reg */
 681    {
 682        .offset     = PCI_INTERRUPT_LINE,
 683        .size       = 1,
 684        .init_val   = 0x00,
 685        .ro_mask    = 0x00,
 686        .emu_mask   = 0xFF,
 687        .init       = xen_pt_common_reg_init,
 688        .u.b.read   = xen_pt_byte_reg_read,
 689        .u.b.write  = xen_pt_byte_reg_write,
 690    },
 691    /* Interrupt Pin reg */
 692    {
 693        .offset     = PCI_INTERRUPT_PIN,
 694        .size       = 1,
 695        .init_val   = 0x00,
 696        .ro_mask    = 0xFF,
 697        .emu_mask   = 0xFF,
 698        .init       = xen_pt_irqpin_reg_init,
 699        .u.b.read   = xen_pt_byte_reg_read,
 700        .u.b.write  = xen_pt_byte_reg_write,
 701    },
 702    /* BAR 0 reg */
 703    /* mask of BAR need to be decided later, depends on IO/MEM type */
 704    {
 705        .offset     = PCI_BASE_ADDRESS_0,
 706        .size       = 4,
 707        .init_val   = 0x00000000,
 708        .init       = xen_pt_bar_reg_init,
 709        .u.dw.read  = xen_pt_bar_reg_read,
 710        .u.dw.write = xen_pt_bar_reg_write,
 711    },
 712    /* BAR 1 reg */
 713    {
 714        .offset     = PCI_BASE_ADDRESS_1,
 715        .size       = 4,
 716        .init_val   = 0x00000000,
 717        .init       = xen_pt_bar_reg_init,
 718        .u.dw.read  = xen_pt_bar_reg_read,
 719        .u.dw.write = xen_pt_bar_reg_write,
 720    },
 721    /* BAR 2 reg */
 722    {
 723        .offset     = PCI_BASE_ADDRESS_2,
 724        .size       = 4,
 725        .init_val   = 0x00000000,
 726        .init       = xen_pt_bar_reg_init,
 727        .u.dw.read  = xen_pt_bar_reg_read,
 728        .u.dw.write = xen_pt_bar_reg_write,
 729    },
 730    /* BAR 3 reg */
 731    {
 732        .offset     = PCI_BASE_ADDRESS_3,
 733        .size       = 4,
 734        .init_val   = 0x00000000,
 735        .init       = xen_pt_bar_reg_init,
 736        .u.dw.read  = xen_pt_bar_reg_read,
 737        .u.dw.write = xen_pt_bar_reg_write,
 738    },
 739    /* BAR 4 reg */
 740    {
 741        .offset     = PCI_BASE_ADDRESS_4,
 742        .size       = 4,
 743        .init_val   = 0x00000000,
 744        .init       = xen_pt_bar_reg_init,
 745        .u.dw.read  = xen_pt_bar_reg_read,
 746        .u.dw.write = xen_pt_bar_reg_write,
 747    },
 748    /* BAR 5 reg */
 749    {
 750        .offset     = PCI_BASE_ADDRESS_5,
 751        .size       = 4,
 752        .init_val   = 0x00000000,
 753        .init       = xen_pt_bar_reg_init,
 754        .u.dw.read  = xen_pt_bar_reg_read,
 755        .u.dw.write = xen_pt_bar_reg_write,
 756    },
 757    /* Expansion ROM BAR reg */
 758    {
 759        .offset     = PCI_ROM_ADDRESS,
 760        .size       = 4,
 761        .init_val   = 0x00000000,
 762        .ro_mask    = ~PCI_ROM_ADDRESS_MASK & ~PCI_ROM_ADDRESS_ENABLE,
 763        .emu_mask   = (uint32_t)PCI_ROM_ADDRESS_MASK,
 764        .init       = xen_pt_bar_reg_init,
 765        .u.dw.read  = xen_pt_long_reg_read,
 766        .u.dw.write = xen_pt_exp_rom_bar_reg_write,
 767    },
 768    {
 769        .size = 0,
 770    },
 771};
 772
 773
 774/*********************************
 775 * Vital Product Data Capability
 776 */
 777
 778/* Vital Product Data Capability Structure reg static information table */
 779static XenPTRegInfo xen_pt_emu_reg_vpd[] = {
 780    {
 781        .offset     = PCI_CAP_LIST_NEXT,
 782        .size       = 1,
 783        .init_val   = 0x00,
 784        .ro_mask    = 0xFF,
 785        .emu_mask   = 0xFF,
 786        .init       = xen_pt_ptr_reg_init,
 787        .u.b.read   = xen_pt_byte_reg_read,
 788        .u.b.write  = xen_pt_byte_reg_write,
 789    },
 790    {
 791        .offset     = PCI_VPD_ADDR,
 792        .size       = 2,
 793        .ro_mask    = 0x0003,
 794        .emu_mask   = 0x0003,
 795        .init       = xen_pt_common_reg_init,
 796        .u.w.read   = xen_pt_word_reg_read,
 797        .u.w.write  = xen_pt_word_reg_write,
 798    },
 799    {
 800        .size = 0,
 801    },
 802};
 803
 804
 805/**************************************
 806 * Vendor Specific Capability
 807 */
 808
 809/* Vendor Specific Capability Structure reg static information table */
 810static XenPTRegInfo xen_pt_emu_reg_vendor[] = {
 811    {
 812        .offset     = PCI_CAP_LIST_NEXT,
 813        .size       = 1,
 814        .init_val   = 0x00,
 815        .ro_mask    = 0xFF,
 816        .emu_mask   = 0xFF,
 817        .init       = xen_pt_ptr_reg_init,
 818        .u.b.read   = xen_pt_byte_reg_read,
 819        .u.b.write  = xen_pt_byte_reg_write,
 820    },
 821    {
 822        .size = 0,
 823    },
 824};
 825
 826
 827/*****************************
 828 * PCI Express Capability
 829 */
 830
 831static inline uint8_t get_capability_version(XenPCIPassthroughState *s,
 832                                             uint32_t offset)
 833{
 834    uint8_t flag;
 835    if (xen_host_pci_get_byte(&s->real_device, offset + PCI_EXP_FLAGS, &flag)) {
 836        return 0;
 837    }
 838    return flag & PCI_EXP_FLAGS_VERS;
 839}
 840
 841static inline uint8_t get_device_type(XenPCIPassthroughState *s,
 842                                      uint32_t offset)
 843{
 844    uint8_t flag;
 845    if (xen_host_pci_get_byte(&s->real_device, offset + PCI_EXP_FLAGS, &flag)) {
 846        return 0;
 847    }
 848    return (flag & PCI_EXP_FLAGS_TYPE) >> 4;
 849}
 850
 851/* initialize Link Control register */
 852static int xen_pt_linkctrl_reg_init(XenPCIPassthroughState *s,
 853                                    XenPTRegInfo *reg, uint32_t real_offset,
 854                                    uint32_t *data)
 855{
 856    uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
 857    uint8_t dev_type = get_device_type(s, real_offset - reg->offset);
 858
 859    /* no need to initialize in case of Root Complex Integrated Endpoint
 860     * with cap_ver 1.x
 861     */
 862    if ((dev_type == PCI_EXP_TYPE_RC_END) && (cap_ver == 1)) {
 863        *data = XEN_PT_INVALID_REG;
 864    }
 865
 866    *data = reg->init_val;
 867    return 0;
 868}
 869/* initialize Device Control 2 register */
 870static int xen_pt_devctrl2_reg_init(XenPCIPassthroughState *s,
 871                                    XenPTRegInfo *reg, uint32_t real_offset,
 872                                    uint32_t *data)
 873{
 874    uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
 875
 876    /* no need to initialize in case of cap_ver 1.x */
 877    if (cap_ver == 1) {
 878        *data = XEN_PT_INVALID_REG;
 879    }
 880
 881    *data = reg->init_val;
 882    return 0;
 883}
 884/* initialize Link Control 2 register */
 885static int xen_pt_linkctrl2_reg_init(XenPCIPassthroughState *s,
 886                                     XenPTRegInfo *reg, uint32_t real_offset,
 887                                     uint32_t *data)
 888{
 889    uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
 890    uint32_t reg_field = 0;
 891
 892    /* no need to initialize in case of cap_ver 1.x */
 893    if (cap_ver == 1) {
 894        reg_field = XEN_PT_INVALID_REG;
 895    } else {
 896        /* set Supported Link Speed */
 897        uint8_t lnkcap;
 898        int rc;
 899        rc = xen_host_pci_get_byte(&s->real_device,
 900                                   real_offset - reg->offset + PCI_EXP_LNKCAP,
 901                                   &lnkcap);
 902        if (rc) {
 903            return rc;
 904        }
 905        reg_field |= PCI_EXP_LNKCAP_SLS & lnkcap;
 906    }
 907
 908    *data = reg_field;
 909    return 0;
 910}
 911
 912/* PCI Express Capability Structure reg static information table */
 913static XenPTRegInfo xen_pt_emu_reg_pcie[] = {
 914    /* Next Pointer reg */
 915    {
 916        .offset     = PCI_CAP_LIST_NEXT,
 917        .size       = 1,
 918        .init_val   = 0x00,
 919        .ro_mask    = 0xFF,
 920        .emu_mask   = 0xFF,
 921        .init       = xen_pt_ptr_reg_init,
 922        .u.b.read   = xen_pt_byte_reg_read,
 923        .u.b.write  = xen_pt_byte_reg_write,
 924    },
 925    /* Device Capabilities reg */
 926    {
 927        .offset     = PCI_EXP_DEVCAP,
 928        .size       = 4,
 929        .init_val   = 0x00000000,
 930        .ro_mask    = 0xFFFFFFFF,
 931        .emu_mask   = 0x10000000,
 932        .init       = xen_pt_common_reg_init,
 933        .u.dw.read  = xen_pt_long_reg_read,
 934        .u.dw.write = xen_pt_long_reg_write,
 935    },
 936    /* Device Control reg */
 937    {
 938        .offset     = PCI_EXP_DEVCTL,
 939        .size       = 2,
 940        .init_val   = 0x2810,
 941        .ro_mask    = 0x8400,
 942        .emu_mask   = 0xFFFF,
 943        .init       = xen_pt_common_reg_init,
 944        .u.w.read   = xen_pt_word_reg_read,
 945        .u.w.write  = xen_pt_word_reg_write,
 946    },
 947    /* Device Status reg */
 948    {
 949        .offset     = PCI_EXP_DEVSTA,
 950        .size       = 2,
 951        .res_mask   = 0xFFC0,
 952        .ro_mask    = 0x0030,
 953        .rw1c_mask  = 0x000F,
 954        .init       = xen_pt_common_reg_init,
 955        .u.w.read   = xen_pt_word_reg_read,
 956        .u.w.write  = xen_pt_word_reg_write,
 957    },
 958    /* Link Control reg */
 959    {
 960        .offset     = PCI_EXP_LNKCTL,
 961        .size       = 2,
 962        .init_val   = 0x0000,
 963        .ro_mask    = 0xFC34,
 964        .emu_mask   = 0xFFFF,
 965        .init       = xen_pt_linkctrl_reg_init,
 966        .u.w.read   = xen_pt_word_reg_read,
 967        .u.w.write  = xen_pt_word_reg_write,
 968    },
 969    /* Link Status reg */
 970    {
 971        .offset     = PCI_EXP_LNKSTA,
 972        .size       = 2,
 973        .ro_mask    = 0x3FFF,
 974        .rw1c_mask  = 0xC000,
 975        .init       = xen_pt_common_reg_init,
 976        .u.w.read   = xen_pt_word_reg_read,
 977        .u.w.write  = xen_pt_word_reg_write,
 978    },
 979    /* Device Control 2 reg */
 980    {
 981        .offset     = 0x28,
 982        .size       = 2,
 983        .init_val   = 0x0000,
 984        .ro_mask    = 0xFFE0,
 985        .emu_mask   = 0xFFFF,
 986        .init       = xen_pt_devctrl2_reg_init,
 987        .u.w.read   = xen_pt_word_reg_read,
 988        .u.w.write  = xen_pt_word_reg_write,
 989    },
 990    /* Link Control 2 reg */
 991    {
 992        .offset     = 0x30,
 993        .size       = 2,
 994        .init_val   = 0x0000,
 995        .ro_mask    = 0xE040,
 996        .emu_mask   = 0xFFFF,
 997        .init       = xen_pt_linkctrl2_reg_init,
 998        .u.w.read   = xen_pt_word_reg_read,
 999        .u.w.write  = xen_pt_word_reg_write,
1000    },
1001    {
1002        .size = 0,
1003    },
1004};
1005
1006
1007/*********************************
1008 * Power Management Capability
1009 */
1010
1011/* Power Management Capability reg static information table */
1012static XenPTRegInfo xen_pt_emu_reg_pm[] = {
1013    /* Next Pointer reg */
1014    {
1015        .offset     = PCI_CAP_LIST_NEXT,
1016        .size       = 1,
1017        .init_val   = 0x00,
1018        .ro_mask    = 0xFF,
1019        .emu_mask   = 0xFF,
1020        .init       = xen_pt_ptr_reg_init,
1021        .u.b.read   = xen_pt_byte_reg_read,
1022        .u.b.write  = xen_pt_byte_reg_write,
1023    },
1024    /* Power Management Capabilities reg */
1025    {
1026        .offset     = PCI_CAP_FLAGS,
1027        .size       = 2,
1028        .init_val   = 0x0000,
1029        .ro_mask    = 0xFFFF,
1030        .emu_mask   = 0xF9C8,
1031        .init       = xen_pt_common_reg_init,
1032        .u.w.read   = xen_pt_word_reg_read,
1033        .u.w.write  = xen_pt_word_reg_write,
1034    },
1035    /* PCI Power Management Control/Status reg */
1036    {
1037        .offset     = PCI_PM_CTRL,
1038        .size       = 2,
1039        .init_val   = 0x0008,
1040        .res_mask   = 0x00F0,
1041        .ro_mask    = 0x610C,
1042        .rw1c_mask  = 0x8000,
1043        .emu_mask   = 0x810B,
1044        .init       = xen_pt_common_reg_init,
1045        .u.w.read   = xen_pt_word_reg_read,
1046        .u.w.write  = xen_pt_word_reg_write,
1047    },
1048    {
1049        .size = 0,
1050    },
1051};
1052
1053
1054/********************************
1055 * MSI Capability
1056 */
1057
1058/* Helper */
1059#define xen_pt_msi_check_type(offset, flags, what) \
1060        ((offset) == ((flags) & PCI_MSI_FLAGS_64BIT ? \
1061                      PCI_MSI_##what##_64 : PCI_MSI_##what##_32))
1062
1063/* Message Control register */
1064static int xen_pt_msgctrl_reg_init(XenPCIPassthroughState *s,
1065                                   XenPTRegInfo *reg, uint32_t real_offset,
1066                                   uint32_t *data)
1067{
1068    XenPTMSI *msi = s->msi;
1069    uint16_t reg_field;
1070    int rc;
1071
1072    /* use I/O device register's value as initial value */
1073    rc = xen_host_pci_get_word(&s->real_device, real_offset, &reg_field);
1074    if (rc) {
1075        return rc;
1076    }
1077    if (reg_field & PCI_MSI_FLAGS_ENABLE) {
1078        XEN_PT_LOG(&s->dev, "MSI already enabled, disabling it first\n");
1079        xen_host_pci_set_word(&s->real_device, real_offset,
1080                              reg_field & ~PCI_MSI_FLAGS_ENABLE);
1081    }
1082    msi->flags |= reg_field;
1083    msi->ctrl_offset = real_offset;
1084    msi->initialized = false;
1085    msi->mapped = false;
1086
1087    *data = reg->init_val;
1088    return 0;
1089}
1090static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s,
1091                                    XenPTReg *cfg_entry, uint16_t *val,
1092                                    uint16_t dev_value, uint16_t valid_mask)
1093{
1094    XenPTRegInfo *reg = cfg_entry->reg;
1095    XenPTMSI *msi = s->msi;
1096    uint16_t writable_mask = 0;
1097    uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
1098    uint16_t *data = cfg_entry->ptr.half_word;
1099
1100    /* Currently no support for multi-vector */
1101    if (*val & PCI_MSI_FLAGS_QSIZE) {
1102        XEN_PT_WARN(&s->dev, "Tries to set more than 1 vector ctrl %x\n", *val);
1103    }
1104
1105    /* modify emulate register */
1106    writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
1107    *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
1108    msi->flags |= *data & ~PCI_MSI_FLAGS_ENABLE;
1109
1110    /* create value for writing to I/O device register */
1111    *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
1112
1113    /* update MSI */
1114    if (*val & PCI_MSI_FLAGS_ENABLE) {
1115        /* setup MSI pirq for the first time */
1116        if (!msi->initialized) {
1117            /* Init physical one */
1118            XEN_PT_LOG(&s->dev, "setup MSI (register: %x).\n", *val);
1119            if (xen_pt_msi_setup(s)) {
1120                /* We do not broadcast the error to the framework code, so
1121                 * that MSI errors are contained in MSI emulation code and
1122                 * QEMU can go on running.
1123                 * Guest MSI would be actually not working.
1124                 */
1125                *val &= ~PCI_MSI_FLAGS_ENABLE;
1126                XEN_PT_WARN(&s->dev, "Can not map MSI (register: %x)!\n", *val);
1127                return 0;
1128            }
1129            if (xen_pt_msi_update(s)) {
1130                *val &= ~PCI_MSI_FLAGS_ENABLE;
1131                XEN_PT_WARN(&s->dev, "Can not bind MSI (register: %x)!\n", *val);
1132                return 0;
1133            }
1134            msi->initialized = true;
1135            msi->mapped = true;
1136        }
1137        msi->flags |= PCI_MSI_FLAGS_ENABLE;
1138    } else if (msi->mapped) {
1139        xen_pt_msi_disable(s);
1140    }
1141
1142    return 0;
1143}
1144
1145/* initialize Message Upper Address register */
1146static int xen_pt_msgaddr64_reg_init(XenPCIPassthroughState *s,
1147                                     XenPTRegInfo *reg, uint32_t real_offset,
1148                                     uint32_t *data)
1149{
1150    /* no need to initialize in case of 32 bit type */
1151    if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) {
1152        *data = XEN_PT_INVALID_REG;
1153    } else {
1154        *data = reg->init_val;
1155    }
1156
1157    return 0;
1158}
1159/* this function will be called twice (for 32 bit and 64 bit type) */
1160/* initialize Message Data register */
1161static int xen_pt_msgdata_reg_init(XenPCIPassthroughState *s,
1162                                   XenPTRegInfo *reg, uint32_t real_offset,
1163                                   uint32_t *data)
1164{
1165    uint32_t flags = s->msi->flags;
1166    uint32_t offset = reg->offset;
1167
1168    /* check the offset whether matches the type or not */
1169    if (xen_pt_msi_check_type(offset, flags, DATA)) {
1170        *data = reg->init_val;
1171    } else {
1172        *data = XEN_PT_INVALID_REG;
1173    }
1174    return 0;
1175}
1176
1177/* this function will be called twice (for 32 bit and 64 bit type) */
1178/* initialize Mask register */
1179static int xen_pt_mask_reg_init(XenPCIPassthroughState *s,
1180                                XenPTRegInfo *reg, uint32_t real_offset,
1181                                uint32_t *data)
1182{
1183    uint32_t flags = s->msi->flags;
1184
1185    /* check the offset whether matches the type or not */
1186    if (!(flags & PCI_MSI_FLAGS_MASKBIT)) {
1187        *data = XEN_PT_INVALID_REG;
1188    } else if (xen_pt_msi_check_type(reg->offset, flags, MASK)) {
1189        *data = reg->init_val;
1190    } else {
1191        *data = XEN_PT_INVALID_REG;
1192    }
1193    return 0;
1194}
1195
1196/* this function will be called twice (for 32 bit and 64 bit type) */
1197/* initialize Pending register */
1198static int xen_pt_pending_reg_init(XenPCIPassthroughState *s,
1199                                   XenPTRegInfo *reg, uint32_t real_offset,
1200                                   uint32_t *data)
1201{
1202    uint32_t flags = s->msi->flags;
1203
1204    /* check the offset whether matches the type or not */
1205    if (!(flags & PCI_MSI_FLAGS_MASKBIT)) {
1206        *data = XEN_PT_INVALID_REG;
1207    } else if (xen_pt_msi_check_type(reg->offset, flags, PENDING)) {
1208        *data = reg->init_val;
1209    } else {
1210        *data = XEN_PT_INVALID_REG;
1211    }
1212    return 0;
1213}
1214
1215/* write Message Address register */
1216static int xen_pt_msgaddr32_reg_write(XenPCIPassthroughState *s,
1217                                      XenPTReg *cfg_entry, uint32_t *val,
1218                                      uint32_t dev_value, uint32_t valid_mask)
1219{
1220    XenPTRegInfo *reg = cfg_entry->reg;
1221    uint32_t writable_mask = 0;
1222    uint32_t old_addr = *cfg_entry->ptr.word;
1223    uint32_t *data = cfg_entry->ptr.word;
1224
1225    /* modify emulate register */
1226    writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
1227    *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
1228    s->msi->addr_lo = *data;
1229
1230    /* create value for writing to I/O device register */
1231    *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
1232
1233    /* update MSI */
1234    if (*data != old_addr) {
1235        if (s->msi->mapped) {
1236            xen_pt_msi_update(s);
1237        }
1238    }
1239
1240    return 0;
1241}
1242/* write Message Upper Address register */
1243static int xen_pt_msgaddr64_reg_write(XenPCIPassthroughState *s,
1244                                      XenPTReg *cfg_entry, uint32_t *val,
1245                                      uint32_t dev_value, uint32_t valid_mask)
1246{
1247    XenPTRegInfo *reg = cfg_entry->reg;
1248    uint32_t writable_mask = 0;
1249    uint32_t old_addr = *cfg_entry->ptr.word;
1250    uint32_t *data = cfg_entry->ptr.word;
1251
1252    /* check whether the type is 64 bit or not */
1253    if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) {
1254        XEN_PT_ERR(&s->dev,
1255                   "Can't write to the upper address without 64 bit support\n");
1256        return -1;
1257    }
1258
1259    /* modify emulate register */
1260    writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
1261    *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
1262    /* update the msi_info too */
1263    s->msi->addr_hi = *data;
1264
1265    /* create value for writing to I/O device register */
1266    *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
1267
1268    /* update MSI */
1269    if (*data != old_addr) {
1270        if (s->msi->mapped) {
1271            xen_pt_msi_update(s);
1272        }
1273    }
1274
1275    return 0;
1276}
1277
1278
1279/* this function will be called twice (for 32 bit and 64 bit type) */
1280/* write Message Data register */
1281static int xen_pt_msgdata_reg_write(XenPCIPassthroughState *s,
1282                                    XenPTReg *cfg_entry, uint16_t *val,
1283                                    uint16_t dev_value, uint16_t valid_mask)
1284{
1285    XenPTRegInfo *reg = cfg_entry->reg;
1286    XenPTMSI *msi = s->msi;
1287    uint16_t writable_mask = 0;
1288    uint16_t old_data = *cfg_entry->ptr.half_word;
1289    uint32_t offset = reg->offset;
1290    uint16_t *data = cfg_entry->ptr.half_word;
1291
1292    /* check the offset whether matches the type or not */
1293    if (!xen_pt_msi_check_type(offset, msi->flags, DATA)) {
1294        /* exit I/O emulator */
1295        XEN_PT_ERR(&s->dev, "the offset does not match the 32/64 bit type!\n");
1296        return -1;
1297    }
1298
1299    /* modify emulate register */
1300    writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
1301    *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
1302    /* update the msi_info too */
1303    msi->data = *data;
1304
1305    /* create value for writing to I/O device register */
1306    *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
1307
1308    /* update MSI */
1309    if (*data != old_data) {
1310        if (msi->mapped) {
1311            xen_pt_msi_update(s);
1312        }
1313    }
1314
1315    return 0;
1316}
1317
1318/* MSI Capability Structure reg static information table */
1319static XenPTRegInfo xen_pt_emu_reg_msi[] = {
1320    /* Next Pointer reg */
1321    {
1322        .offset     = PCI_CAP_LIST_NEXT,
1323        .size       = 1,
1324        .init_val   = 0x00,
1325        .ro_mask    = 0xFF,
1326        .emu_mask   = 0xFF,
1327        .init       = xen_pt_ptr_reg_init,
1328        .u.b.read   = xen_pt_byte_reg_read,
1329        .u.b.write  = xen_pt_byte_reg_write,
1330    },
1331    /* Message Control reg */
1332    {
1333        .offset     = PCI_MSI_FLAGS,
1334        .size       = 2,
1335        .init_val   = 0x0000,
1336        .res_mask   = 0xFE00,
1337        .ro_mask    = 0x018E,
1338        .emu_mask   = 0x017E,
1339        .init       = xen_pt_msgctrl_reg_init,
1340        .u.w.read   = xen_pt_word_reg_read,
1341        .u.w.write  = xen_pt_msgctrl_reg_write,
1342    },
1343    /* Message Address reg */
1344    {
1345        .offset     = PCI_MSI_ADDRESS_LO,
1346        .size       = 4,
1347        .init_val   = 0x00000000,
1348        .ro_mask    = 0x00000003,
1349        .emu_mask   = 0xFFFFFFFF,
1350        .init       = xen_pt_common_reg_init,
1351        .u.dw.read  = xen_pt_long_reg_read,
1352        .u.dw.write = xen_pt_msgaddr32_reg_write,
1353    },
1354    /* Message Upper Address reg (if PCI_MSI_FLAGS_64BIT set) */
1355    {
1356        .offset     = PCI_MSI_ADDRESS_HI,
1357        .size       = 4,
1358        .init_val   = 0x00000000,
1359        .ro_mask    = 0x00000000,
1360        .emu_mask   = 0xFFFFFFFF,
1361        .init       = xen_pt_msgaddr64_reg_init,
1362        .u.dw.read  = xen_pt_long_reg_read,
1363        .u.dw.write = xen_pt_msgaddr64_reg_write,
1364    },
1365    /* Message Data reg (16 bits of data for 32-bit devices) */
1366    {
1367        .offset     = PCI_MSI_DATA_32,
1368        .size       = 2,
1369        .init_val   = 0x0000,
1370        .ro_mask    = 0x0000,
1371        .emu_mask   = 0xFFFF,
1372        .init       = xen_pt_msgdata_reg_init,
1373        .u.w.read   = xen_pt_word_reg_read,
1374        .u.w.write  = xen_pt_msgdata_reg_write,
1375    },
1376    /* Message Data reg (16 bits of data for 64-bit devices) */
1377    {
1378        .offset     = PCI_MSI_DATA_64,
1379        .size       = 2,
1380        .init_val   = 0x0000,
1381        .ro_mask    = 0x0000,
1382        .emu_mask   = 0xFFFF,
1383        .init       = xen_pt_msgdata_reg_init,
1384        .u.w.read   = xen_pt_word_reg_read,
1385        .u.w.write  = xen_pt_msgdata_reg_write,
1386    },
1387    /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */
1388    {
1389        .offset     = PCI_MSI_MASK_32,
1390        .size       = 4,
1391        .init_val   = 0x00000000,
1392        .ro_mask    = 0xFFFFFFFF,
1393        .emu_mask   = 0xFFFFFFFF,
1394        .init       = xen_pt_mask_reg_init,
1395        .u.dw.read  = xen_pt_long_reg_read,
1396        .u.dw.write = xen_pt_long_reg_write,
1397    },
1398    /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */
1399    {
1400        .offset     = PCI_MSI_MASK_64,
1401        .size       = 4,
1402        .init_val   = 0x00000000,
1403        .ro_mask    = 0xFFFFFFFF,
1404        .emu_mask   = 0xFFFFFFFF,
1405        .init       = xen_pt_mask_reg_init,
1406        .u.dw.read  = xen_pt_long_reg_read,
1407        .u.dw.write = xen_pt_long_reg_write,
1408    },
1409    /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */
1410    {
1411        .offset     = PCI_MSI_MASK_32 + 4,
1412        .size       = 4,
1413        .init_val   = 0x00000000,
1414        .ro_mask    = 0xFFFFFFFF,
1415        .emu_mask   = 0x00000000,
1416        .init       = xen_pt_pending_reg_init,
1417        .u.dw.read  = xen_pt_long_reg_read,
1418        .u.dw.write = xen_pt_long_reg_write,
1419    },
1420    /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */
1421    {
1422        .offset     = PCI_MSI_MASK_64 + 4,
1423        .size       = 4,
1424        .init_val   = 0x00000000,
1425        .ro_mask    = 0xFFFFFFFF,
1426        .emu_mask   = 0x00000000,
1427        .init       = xen_pt_pending_reg_init,
1428        .u.dw.read  = xen_pt_long_reg_read,
1429        .u.dw.write = xen_pt_long_reg_write,
1430    },
1431    {
1432        .size = 0,
1433    },
1434};
1435
1436
1437/**************************************
1438 * MSI-X Capability
1439 */
1440
1441/* Message Control register for MSI-X */
1442static int xen_pt_msixctrl_reg_init(XenPCIPassthroughState *s,
1443                                    XenPTRegInfo *reg, uint32_t real_offset,
1444                                    uint32_t *data)
1445{
1446    uint16_t reg_field;
1447    int rc;
1448
1449    /* use I/O device register's value as initial value */
1450    rc = xen_host_pci_get_word(&s->real_device, real_offset, &reg_field);
1451    if (rc) {
1452        return rc;
1453    }
1454    if (reg_field & PCI_MSIX_FLAGS_ENABLE) {
1455        XEN_PT_LOG(&s->dev, "MSIX already enabled, disabling it first\n");
1456        xen_host_pci_set_word(&s->real_device, real_offset,
1457                              reg_field & ~PCI_MSIX_FLAGS_ENABLE);
1458    }
1459
1460    s->msix->ctrl_offset = real_offset;
1461
1462    *data = reg->init_val;
1463    return 0;
1464}
1465static int xen_pt_msixctrl_reg_write(XenPCIPassthroughState *s,
1466                                     XenPTReg *cfg_entry, uint16_t *val,
1467                                     uint16_t dev_value, uint16_t valid_mask)
1468{
1469    XenPTRegInfo *reg = cfg_entry->reg;
1470    uint16_t writable_mask = 0;
1471    uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
1472    int debug_msix_enabled_old;
1473    uint16_t *data = cfg_entry->ptr.half_word;
1474
1475    /* modify emulate register */
1476    writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
1477    *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
1478
1479    /* create value for writing to I/O device register */
1480    *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
1481
1482    /* update MSI-X */
1483    if ((*val & PCI_MSIX_FLAGS_ENABLE)
1484        && !(*val & PCI_MSIX_FLAGS_MASKALL)) {
1485        xen_pt_msix_update(s);
1486    } else if (!(*val & PCI_MSIX_FLAGS_ENABLE) && s->msix->enabled) {
1487        xen_pt_msix_disable(s);
1488    }
1489
1490    s->msix->maskall = *val & PCI_MSIX_FLAGS_MASKALL;
1491
1492    debug_msix_enabled_old = s->msix->enabled;
1493    s->msix->enabled = !!(*val & PCI_MSIX_FLAGS_ENABLE);
1494    if (s->msix->enabled != debug_msix_enabled_old) {
1495        XEN_PT_LOG(&s->dev, "%s MSI-X\n",
1496                   s->msix->enabled ? "enable" : "disable");
1497    }
1498
1499    return 0;
1500}
1501
1502/* MSI-X Capability Structure reg static information table */
1503static XenPTRegInfo xen_pt_emu_reg_msix[] = {
1504    /* Next Pointer reg */
1505    {
1506        .offset     = PCI_CAP_LIST_NEXT,
1507        .size       = 1,
1508        .init_val   = 0x00,
1509        .ro_mask    = 0xFF,
1510        .emu_mask   = 0xFF,
1511        .init       = xen_pt_ptr_reg_init,
1512        .u.b.read   = xen_pt_byte_reg_read,
1513        .u.b.write  = xen_pt_byte_reg_write,
1514    },
1515    /* Message Control reg */
1516    {
1517        .offset     = PCI_MSI_FLAGS,
1518        .size       = 2,
1519        .init_val   = 0x0000,
1520        .res_mask   = 0x3800,
1521        .ro_mask    = 0x07FF,
1522        .emu_mask   = 0x0000,
1523        .init       = xen_pt_msixctrl_reg_init,
1524        .u.w.read   = xen_pt_word_reg_read,
1525        .u.w.write  = xen_pt_msixctrl_reg_write,
1526    },
1527    {
1528        .size = 0,
1529    },
1530};
1531
1532static XenPTRegInfo xen_pt_emu_reg_igd_opregion[] = {
1533    /* Intel IGFX OpRegion reg */
1534    {
1535        .offset     = 0x0,
1536        .size       = 4,
1537        .init_val   = 0,
1538        .u.dw.read   = xen_pt_intel_opregion_read,
1539        .u.dw.write  = xen_pt_intel_opregion_write,
1540    },
1541    {
1542        .size = 0,
1543    },
1544};
1545
1546/****************************
1547 * Capabilities
1548 */
1549
1550/* capability structure register group size functions */
1551
1552static int xen_pt_reg_grp_size_init(XenPCIPassthroughState *s,
1553                                    const XenPTRegGroupInfo *grp_reg,
1554                                    uint32_t base_offset, uint8_t *size)
1555{
1556    *size = grp_reg->grp_size;
1557    return 0;
1558}
1559/* get Vendor Specific Capability Structure register group size */
1560static int xen_pt_vendor_size_init(XenPCIPassthroughState *s,
1561                                   const XenPTRegGroupInfo *grp_reg,
1562                                   uint32_t base_offset, uint8_t *size)
1563{
1564    return xen_host_pci_get_byte(&s->real_device, base_offset + 0x02, size);
1565}
1566/* get PCI Express Capability Structure register group size */
1567static int xen_pt_pcie_size_init(XenPCIPassthroughState *s,
1568                                 const XenPTRegGroupInfo *grp_reg,
1569                                 uint32_t base_offset, uint8_t *size)
1570{
1571    PCIDevice *d = &s->dev;
1572    uint8_t version = get_capability_version(s, base_offset);
1573    uint8_t type = get_device_type(s, base_offset);
1574    uint8_t pcie_size = 0;
1575
1576
1577    /* calculate size depend on capability version and device/port type */
1578    /* in case of PCI Express Base Specification Rev 1.x */
1579    if (version == 1) {
1580        /* The PCI Express Capabilities, Device Capabilities, and Device
1581         * Status/Control registers are required for all PCI Express devices.
1582         * The Link Capabilities and Link Status/Control are required for all
1583         * Endpoints that are not Root Complex Integrated Endpoints. Endpoints
1584         * are not required to implement registers other than those listed
1585         * above and terminate the capability structure.
1586         */
1587        switch (type) {
1588        case PCI_EXP_TYPE_ENDPOINT:
1589        case PCI_EXP_TYPE_LEG_END:
1590            pcie_size = 0x14;
1591            break;
1592        case PCI_EXP_TYPE_RC_END:
1593            /* has no link */
1594            pcie_size = 0x0C;
1595            break;
1596            /* only EndPoint passthrough is supported */
1597        case PCI_EXP_TYPE_ROOT_PORT:
1598        case PCI_EXP_TYPE_UPSTREAM:
1599        case PCI_EXP_TYPE_DOWNSTREAM:
1600        case PCI_EXP_TYPE_PCI_BRIDGE:
1601        case PCI_EXP_TYPE_PCIE_BRIDGE:
1602        case PCI_EXP_TYPE_RC_EC:
1603        default:
1604            XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type);
1605            return -1;
1606        }
1607    }
1608    /* in case of PCI Express Base Specification Rev 2.0 */
1609    else if (version == 2) {
1610        switch (type) {
1611        case PCI_EXP_TYPE_ENDPOINT:
1612        case PCI_EXP_TYPE_LEG_END:
1613        case PCI_EXP_TYPE_RC_END:
1614            /* For Functions that do not implement the registers,
1615             * these spaces must be hardwired to 0b.
1616             */
1617            pcie_size = 0x3C;
1618            break;
1619            /* only EndPoint passthrough is supported */
1620        case PCI_EXP_TYPE_ROOT_PORT:
1621        case PCI_EXP_TYPE_UPSTREAM:
1622        case PCI_EXP_TYPE_DOWNSTREAM:
1623        case PCI_EXP_TYPE_PCI_BRIDGE:
1624        case PCI_EXP_TYPE_PCIE_BRIDGE:
1625        case PCI_EXP_TYPE_RC_EC:
1626        default:
1627            XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type);
1628            return -1;
1629        }
1630    } else {
1631        XEN_PT_ERR(d, "Unsupported capability version %#x.\n", version);
1632        return -1;
1633    }
1634
1635    *size = pcie_size;
1636    return 0;
1637}
1638/* get MSI Capability Structure register group size */
1639static int xen_pt_msi_size_init(XenPCIPassthroughState *s,
1640                                const XenPTRegGroupInfo *grp_reg,
1641                                uint32_t base_offset, uint8_t *size)
1642{
1643    uint16_t msg_ctrl = 0;
1644    uint8_t msi_size = 0xa;
1645    int rc;
1646
1647    rc = xen_host_pci_get_word(&s->real_device, base_offset + PCI_MSI_FLAGS,
1648                               &msg_ctrl);
1649    if (rc) {
1650        return rc;
1651    }
1652    /* check if 64-bit address is capable of per-vector masking */
1653    if (msg_ctrl & PCI_MSI_FLAGS_64BIT) {
1654        msi_size += 4;
1655    }
1656    if (msg_ctrl & PCI_MSI_FLAGS_MASKBIT) {
1657        msi_size += 10;
1658    }
1659
1660    s->msi = g_new0(XenPTMSI, 1);
1661    s->msi->pirq = XEN_PT_UNASSIGNED_PIRQ;
1662
1663    *size = msi_size;
1664    return 0;
1665}
1666/* get MSI-X Capability Structure register group size */
1667static int xen_pt_msix_size_init(XenPCIPassthroughState *s,
1668                                 const XenPTRegGroupInfo *grp_reg,
1669                                 uint32_t base_offset, uint8_t *size)
1670{
1671    int rc = 0;
1672
1673    rc = xen_pt_msix_init(s, base_offset);
1674
1675    if (rc < 0) {
1676        XEN_PT_ERR(&s->dev, "Internal error: Invalid xen_pt_msix_init.\n");
1677        return rc;
1678    }
1679
1680    *size = grp_reg->grp_size;
1681    return 0;
1682}
1683
1684
1685static const XenPTRegGroupInfo xen_pt_emu_reg_grps[] = {
1686    /* Header Type0 reg group */
1687    {
1688        .grp_id      = 0xFF,
1689        .grp_type    = XEN_PT_GRP_TYPE_EMU,
1690        .grp_size    = 0x40,
1691        .size_init   = xen_pt_reg_grp_size_init,
1692        .emu_regs = xen_pt_emu_reg_header0,
1693    },
1694    /* PCI PowerManagement Capability reg group */
1695    {
1696        .grp_id      = PCI_CAP_ID_PM,
1697        .grp_type    = XEN_PT_GRP_TYPE_EMU,
1698        .grp_size    = PCI_PM_SIZEOF,
1699        .size_init   = xen_pt_reg_grp_size_init,
1700        .emu_regs = xen_pt_emu_reg_pm,
1701    },
1702    /* AGP Capability Structure reg group */
1703    {
1704        .grp_id     = PCI_CAP_ID_AGP,
1705        .grp_type   = XEN_PT_GRP_TYPE_HARDWIRED,
1706        .grp_size   = 0x30,
1707        .size_init  = xen_pt_reg_grp_size_init,
1708    },
1709    /* Vital Product Data Capability Structure reg group */
1710    {
1711        .grp_id      = PCI_CAP_ID_VPD,
1712        .grp_type    = XEN_PT_GRP_TYPE_EMU,
1713        .grp_size    = 0x08,
1714        .size_init   = xen_pt_reg_grp_size_init,
1715        .emu_regs = xen_pt_emu_reg_vpd,
1716    },
1717    /* Slot Identification reg group */
1718    {
1719        .grp_id     = PCI_CAP_ID_SLOTID,
1720        .grp_type   = XEN_PT_GRP_TYPE_HARDWIRED,
1721        .grp_size   = 0x04,
1722        .size_init  = xen_pt_reg_grp_size_init,
1723    },
1724    /* MSI Capability Structure reg group */
1725    {
1726        .grp_id      = PCI_CAP_ID_MSI,
1727        .grp_type    = XEN_PT_GRP_TYPE_EMU,
1728        .grp_size    = 0xFF,
1729        .size_init   = xen_pt_msi_size_init,
1730        .emu_regs = xen_pt_emu_reg_msi,
1731    },
1732    /* PCI-X Capabilities List Item reg group */
1733    {
1734        .grp_id     = PCI_CAP_ID_PCIX,
1735        .grp_type   = XEN_PT_GRP_TYPE_HARDWIRED,
1736        .grp_size   = 0x18,
1737        .size_init  = xen_pt_reg_grp_size_init,
1738    },
1739    /* Vendor Specific Capability Structure reg group */
1740    {
1741        .grp_id      = PCI_CAP_ID_VNDR,
1742        .grp_type    = XEN_PT_GRP_TYPE_EMU,
1743        .grp_size    = 0xFF,
1744        .size_init   = xen_pt_vendor_size_init,
1745        .emu_regs = xen_pt_emu_reg_vendor,
1746    },
1747    /* SHPC Capability List Item reg group */
1748    {
1749        .grp_id     = PCI_CAP_ID_SHPC,
1750        .grp_type   = XEN_PT_GRP_TYPE_HARDWIRED,
1751        .grp_size   = 0x08,
1752        .size_init  = xen_pt_reg_grp_size_init,
1753    },
1754    /* Subsystem ID and Subsystem Vendor ID Capability List Item reg group */
1755    {
1756        .grp_id     = PCI_CAP_ID_SSVID,
1757        .grp_type   = XEN_PT_GRP_TYPE_HARDWIRED,
1758        .grp_size   = 0x08,
1759        .size_init  = xen_pt_reg_grp_size_init,
1760    },
1761    /* AGP 8x Capability Structure reg group */
1762    {
1763        .grp_id     = PCI_CAP_ID_AGP3,
1764        .grp_type   = XEN_PT_GRP_TYPE_HARDWIRED,
1765        .grp_size   = 0x30,
1766        .size_init  = xen_pt_reg_grp_size_init,
1767    },
1768    /* PCI Express Capability Structure reg group */
1769    {
1770        .grp_id      = PCI_CAP_ID_EXP,
1771        .grp_type    = XEN_PT_GRP_TYPE_EMU,
1772        .grp_size    = 0xFF,
1773        .size_init   = xen_pt_pcie_size_init,
1774        .emu_regs = xen_pt_emu_reg_pcie,
1775    },
1776    /* MSI-X Capability Structure reg group */
1777    {
1778        .grp_id      = PCI_CAP_ID_MSIX,
1779        .grp_type    = XEN_PT_GRP_TYPE_EMU,
1780        .grp_size    = 0x0C,
1781        .size_init   = xen_pt_msix_size_init,
1782        .emu_regs = xen_pt_emu_reg_msix,
1783    },
1784    /* Intel IGD Opregion group */
1785    {
1786        .grp_id      = XEN_PCI_INTEL_OPREGION,
1787        .grp_type    = XEN_PT_GRP_TYPE_EMU,
1788        .grp_size    = 0x4,
1789        .size_init   = xen_pt_reg_grp_size_init,
1790        .emu_regs    = xen_pt_emu_reg_igd_opregion,
1791    },
1792    {
1793        .grp_size = 0,
1794    },
1795};
1796
1797/* initialize Capabilities Pointer or Next Pointer register */
1798static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s,
1799                               XenPTRegInfo *reg, uint32_t real_offset,
1800                               uint32_t *data)
1801{
1802    int i, rc;
1803    uint8_t reg_field;
1804    uint8_t cap_id = 0;
1805
1806    rc = xen_host_pci_get_byte(&s->real_device, real_offset, &reg_field);
1807    if (rc) {
1808        return rc;
1809    }
1810    /* find capability offset */
1811    while (reg_field) {
1812        for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) {
1813            if (xen_pt_hide_dev_cap(&s->real_device,
1814                                    xen_pt_emu_reg_grps[i].grp_id)) {
1815                continue;
1816            }
1817
1818            rc = xen_host_pci_get_byte(&s->real_device,
1819                                       reg_field + PCI_CAP_LIST_ID, &cap_id);
1820            if (rc) {
1821                XEN_PT_ERR(&s->dev, "Failed to read capability @0x%x (rc:%d)\n",
1822                           reg_field + PCI_CAP_LIST_ID, rc);
1823                return rc;
1824            }
1825            if (xen_pt_emu_reg_grps[i].grp_id == cap_id) {
1826                if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) {
1827                    goto out;
1828                }
1829                /* ignore the 0 hardwired capability, find next one */
1830                break;
1831            }
1832        }
1833
1834        /* next capability */
1835        rc = xen_host_pci_get_byte(&s->real_device,
1836                                   reg_field + PCI_CAP_LIST_NEXT, &reg_field);
1837        if (rc) {
1838            return rc;
1839        }
1840    }
1841
1842out:
1843    *data = reg_field;
1844    return 0;
1845}
1846
1847
1848/*************
1849 * Main
1850 */
1851
1852static uint8_t find_cap_offset(XenPCIPassthroughState *s, uint8_t cap)
1853{
1854    uint8_t id;
1855    unsigned max_cap = XEN_PCI_CAP_MAX;
1856    uint8_t pos = PCI_CAPABILITY_LIST;
1857    uint8_t status = 0;
1858
1859    if (xen_host_pci_get_byte(&s->real_device, PCI_STATUS, &status)) {
1860        return 0;
1861    }
1862    if ((status & PCI_STATUS_CAP_LIST) == 0) {
1863        return 0;
1864    }
1865
1866    while (max_cap--) {
1867        if (xen_host_pci_get_byte(&s->real_device, pos, &pos)) {
1868            break;
1869        }
1870        if (pos < PCI_CONFIG_HEADER_SIZE) {
1871            break;
1872        }
1873
1874        pos &= ~3;
1875        if (xen_host_pci_get_byte(&s->real_device,
1876                                  pos + PCI_CAP_LIST_ID, &id)) {
1877            break;
1878        }
1879
1880        if (id == 0xff) {
1881            break;
1882        }
1883        if (id == cap) {
1884            return pos;
1885        }
1886
1887        pos += PCI_CAP_LIST_NEXT;
1888    }
1889    return 0;
1890}
1891
1892static void xen_pt_config_reg_init(XenPCIPassthroughState *s,
1893                                   XenPTRegGroup *reg_grp, XenPTRegInfo *reg,
1894                                   Error **errp)
1895{
1896    XenPTReg *reg_entry;
1897    uint32_t data = 0;
1898    int rc = 0;
1899
1900    reg_entry = g_new0(XenPTReg, 1);
1901    reg_entry->reg = reg;
1902
1903    if (reg->init) {
1904        uint32_t host_mask, size_mask;
1905        unsigned int offset;
1906        uint32_t val;
1907
1908        /* initialize emulate register */
1909        rc = reg->init(s, reg_entry->reg,
1910                       reg_grp->base_offset + reg->offset, &data);
1911        if (rc < 0) {
1912            g_free(reg_entry);
1913            error_setg(errp, "Init emulate register fail");
1914            return;
1915        }
1916        if (data == XEN_PT_INVALID_REG) {
1917            /* free unused BAR register entry */
1918            g_free(reg_entry);
1919            return;
1920        }
1921        /* Sync up the data to dev.config */
1922        offset = reg_grp->base_offset + reg->offset;
1923        size_mask = 0xFFFFFFFF >> ((4 - reg->size) << 3);
1924
1925        switch (reg->size) {
1926        case 1: rc = xen_host_pci_get_byte(&s->real_device, offset, (uint8_t *)&val);
1927                break;
1928        case 2: rc = xen_host_pci_get_word(&s->real_device, offset, (uint16_t *)&val);
1929                break;
1930        case 4: rc = xen_host_pci_get_long(&s->real_device, offset, &val);
1931                break;
1932        default: abort();
1933        }
1934        if (rc) {
1935            /* Serious issues when we cannot read the host values! */
1936            g_free(reg_entry);
1937            error_setg(errp, "Cannot read host values");
1938            return;
1939        }
1940        /* Set bits in emu_mask are the ones we emulate. The dev.config shall
1941         * contain the emulated view of the guest - therefore we flip the mask
1942         * to mask out the host values (which dev.config initially has) . */
1943        host_mask = size_mask & ~reg->emu_mask;
1944
1945        if ((data & host_mask) != (val & host_mask)) {
1946            uint32_t new_val;
1947
1948            /* Mask out host (including past size). */
1949            new_val = val & host_mask;
1950            /* Merge emulated ones (excluding the non-emulated ones). */
1951            new_val |= data & host_mask;
1952            /* Leave intact host and emulated values past the size - even though
1953             * we do not care as we write per reg->size granularity, but for the
1954             * logging below lets have the proper value. */
1955            new_val |= ((val | data)) & ~size_mask;
1956            XEN_PT_LOG(&s->dev,"Offset 0x%04x mismatch! Emulated=0x%04x, host=0x%04x, syncing to 0x%04x.\n",
1957                       offset, data, val, new_val);
1958            val = new_val;
1959        } else
1960            val = data;
1961
1962        if (val & ~size_mask) {
1963            error_setg(errp, "Offset 0x%04x:0x%04x expands past"
1964                    " register size (%d)", offset, val, reg->size);
1965            g_free(reg_entry);
1966            return;
1967        }
1968        /* This could be just pci_set_long as we don't modify the bits
1969         * past reg->size, but in case this routine is run in parallel or the
1970         * init value is larger, we do not want to over-write registers. */
1971        switch (reg->size) {
1972        case 1: pci_set_byte(s->dev.config + offset, (uint8_t)val);
1973                break;
1974        case 2: pci_set_word(s->dev.config + offset, (uint16_t)val);
1975                break;
1976        case 4: pci_set_long(s->dev.config + offset, val);
1977                break;
1978        default: abort();
1979        }
1980        /* set register value pointer to the data. */
1981        reg_entry->ptr.byte = s->dev.config + offset;
1982
1983    }
1984    /* list add register entry */
1985    QLIST_INSERT_HEAD(&reg_grp->reg_tbl_list, reg_entry, entries);
1986}
1987
1988void xen_pt_config_init(XenPCIPassthroughState *s, Error **errp)
1989{
1990    int i, rc;
1991    Error *err = NULL;
1992
1993    QLIST_INIT(&s->reg_grps);
1994
1995    for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) {
1996        uint32_t reg_grp_offset = 0;
1997        XenPTRegGroup *reg_grp_entry = NULL;
1998
1999        if (xen_pt_emu_reg_grps[i].grp_id != 0xFF
2000            && xen_pt_emu_reg_grps[i].grp_id != XEN_PCI_INTEL_OPREGION) {
2001            if (xen_pt_hide_dev_cap(&s->real_device,
2002                                    xen_pt_emu_reg_grps[i].grp_id)) {
2003                continue;
2004            }
2005
2006            reg_grp_offset = find_cap_offset(s, xen_pt_emu_reg_grps[i].grp_id);
2007
2008            if (!reg_grp_offset) {
2009                continue;
2010            }
2011        }
2012
2013        /*
2014         * By default we will trap up to 0x40 in the cfg space.
2015         * If an intel device is pass through we need to trap 0xfc,
2016         * therefore the size should be 0xff.
2017         */
2018        if (xen_pt_emu_reg_grps[i].grp_id == XEN_PCI_INTEL_OPREGION) {
2019            reg_grp_offset = XEN_PCI_INTEL_OPREGION;
2020        }
2021
2022        reg_grp_entry = g_new0(XenPTRegGroup, 1);
2023        QLIST_INIT(&reg_grp_entry->reg_tbl_list);
2024        QLIST_INSERT_HEAD(&s->reg_grps, reg_grp_entry, entries);
2025
2026        reg_grp_entry->base_offset = reg_grp_offset;
2027        reg_grp_entry->reg_grp = xen_pt_emu_reg_grps + i;
2028        if (xen_pt_emu_reg_grps[i].size_init) {
2029            /* get register group size */
2030            rc = xen_pt_emu_reg_grps[i].size_init(s, reg_grp_entry->reg_grp,
2031                                                  reg_grp_offset,
2032                                                  &reg_grp_entry->size);
2033            if (rc < 0) {
2034                error_setg(&err, "Failed to initialize %d/%zu, type = 0x%x,"
2035                           " rc: %d", i, ARRAY_SIZE(xen_pt_emu_reg_grps),
2036                           xen_pt_emu_reg_grps[i].grp_type, rc);
2037                error_propagate(errp, err);
2038                xen_pt_config_delete(s);
2039                return;
2040            }
2041        }
2042
2043        if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) {
2044            if (xen_pt_emu_reg_grps[i].emu_regs) {
2045                int j = 0;
2046                XenPTRegInfo *regs = xen_pt_emu_reg_grps[i].emu_regs;
2047
2048                /* initialize capability register */
2049                for (j = 0; regs->size != 0; j++, regs++) {
2050                    xen_pt_config_reg_init(s, reg_grp_entry, regs, &err);
2051                    if (err) {
2052                        error_append_hint(&err, "Failed to initialize %d/%zu"
2053                                " reg 0x%x in grp_type = 0x%x (%d/%zu)",
2054                                j, ARRAY_SIZE(xen_pt_emu_reg_grps[i].emu_regs),
2055                                regs->offset, xen_pt_emu_reg_grps[i].grp_type,
2056                                i, ARRAY_SIZE(xen_pt_emu_reg_grps));
2057                        error_propagate(errp, err);
2058                        xen_pt_config_delete(s);
2059                        return;
2060                    }
2061                }
2062            }
2063        }
2064    }
2065}
2066
2067/* delete all emulate register */
2068void xen_pt_config_delete(XenPCIPassthroughState *s)
2069{
2070    struct XenPTRegGroup *reg_group, *next_grp;
2071    struct XenPTReg *reg, *next_reg;
2072
2073    /* free MSI/MSI-X info table */
2074    if (s->msix) {
2075        xen_pt_msix_unmap(s);
2076    }
2077    g_free(s->msi);
2078
2079    /* free all register group entry */
2080    QLIST_FOREACH_SAFE(reg_group, &s->reg_grps, entries, next_grp) {
2081        /* free all register entry */
2082        QLIST_FOREACH_SAFE(reg, &reg_group->reg_tbl_list, entries, next_reg) {
2083            QLIST_REMOVE(reg, entries);
2084            g_free(reg);
2085        }
2086
2087        QLIST_REMOVE(reg_group, entries);
2088        g_free(reg_group);
2089    }
2090}
2091