qemu/hw/xen/xen_pt_config_init.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2007, Neocleus Corporation.
   3 * Copyright (c) 2007, Intel Corporation.
   4 *
   5 * This work is licensed under the terms of the GNU GPL, version 2.  See
   6 * the COPYING file in the top-level directory.
   7 *
   8 * Alex Novik <alex@neocleus.com>
   9 * Allen Kay <allen.m.kay@intel.com>
  10 * Guy Zana <guy@neocleus.com>
  11 *
  12 * This file implements direct PCI assignment to a HVM guest
  13 */
  14
  15#include "qemu/osdep.h"
  16#include "qapi/error.h"
  17#include "qemu/timer.h"
  18#include "hw/xen/xen_backend.h"
  19#include "xen_pt.h"
  20
  21#define XEN_PT_MERGE_VALUE(value, data, val_mask) \
  22    (((value) & (val_mask)) | ((data) & ~(val_mask)))
  23
  24#define XEN_PT_INVALID_REG          0xFFFFFFFF      /* invalid register value */
  25
  26/* prototype */
  27
  28static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg,
  29                               uint32_t real_offset, uint32_t *data);
  30
  31
  32/* helper */
  33
  34/* A return value of 1 means the capability should NOT be exposed to guest. */
  35static int xen_pt_hide_dev_cap(const XenHostPCIDevice *d, uint8_t grp_id)
  36{
  37    switch (grp_id) {
  38    case PCI_CAP_ID_EXP:
  39        /* The PCI Express Capability Structure of the VF of Intel 82599 10GbE
  40         * Controller looks trivial, e.g., the PCI Express Capabilities
  41         * Register is 0. We should not try to expose it to guest.
  42         *
  43         * The datasheet is available at
  44         * http://download.intel.com/design/network/datashts/82599_datasheet.pdf
  45         *
  46         * See 'Table 9.7. VF PCIe Configuration Space' of the datasheet, the
  47         * PCI Express Capability Structure of the VF of Intel 82599 10GbE
  48         * Controller looks trivial, e.g., the PCI Express Capabilities
  49         * Register is 0, so the Capability Version is 0 and
  50         * xen_pt_pcie_size_init() would fail.
  51         */
  52        if (d->vendor_id == PCI_VENDOR_ID_INTEL &&
  53            d->device_id == PCI_DEVICE_ID_INTEL_82599_SFP_VF) {
  54            return 1;
  55        }
  56        break;
  57    }
  58    return 0;
  59}
  60
  61/*   find emulate register group entry */
  62XenPTRegGroup *xen_pt_find_reg_grp(XenPCIPassthroughState *s, uint32_t address)
  63{
  64    XenPTRegGroup *entry = NULL;
  65
  66    /* find register group entry */
  67    QLIST_FOREACH(entry, &s->reg_grps, entries) {
  68        /* check address */
  69        if ((entry->base_offset <= address)
  70            && ((entry->base_offset + entry->size) > address)) {
  71            return entry;
  72        }
  73    }
  74
  75    /* group entry not found */
  76    return NULL;
  77}
  78
  79/* find emulate register entry */
  80XenPTReg *xen_pt_find_reg(XenPTRegGroup *reg_grp, uint32_t address)
  81{
  82    XenPTReg *reg_entry = NULL;
  83    XenPTRegInfo *reg = NULL;
  84    uint32_t real_offset = 0;
  85
  86    /* find register entry */
  87    QLIST_FOREACH(reg_entry, &reg_grp->reg_tbl_list, entries) {
  88        reg = reg_entry->reg;
  89        real_offset = reg_grp->base_offset + reg->offset;
  90        /* check address */
  91        if ((real_offset <= address)
  92            && ((real_offset + reg->size) > address)) {
  93            return reg_entry;
  94        }
  95    }
  96
  97    return NULL;
  98}
  99
 100static uint32_t get_throughable_mask(const XenPCIPassthroughState *s,
 101                                     XenPTRegInfo *reg, uint32_t valid_mask)
 102{
 103    uint32_t throughable_mask = ~(reg->emu_mask | reg->ro_mask);
 104
 105    if (!s->permissive) {
 106        throughable_mask &= ~reg->res_mask;
 107    }
 108
 109    return throughable_mask & valid_mask;
 110}
 111
 112/****************
 113 * general register functions
 114 */
 115
 116/* register initialization function */
 117
 118static int xen_pt_common_reg_init(XenPCIPassthroughState *s,
 119                                  XenPTRegInfo *reg, uint32_t real_offset,
 120                                  uint32_t *data)
 121{
 122    *data = reg->init_val;
 123    return 0;
 124}
 125
 126/* Read register functions */
 127
 128static int xen_pt_byte_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
 129                                uint8_t *value, uint8_t valid_mask)
 130{
 131    XenPTRegInfo *reg = cfg_entry->reg;
 132    uint8_t valid_emu_mask = 0;
 133    uint8_t *data = cfg_entry->ptr.byte;
 134
 135    /* emulate byte register */
 136    valid_emu_mask = reg->emu_mask & valid_mask;
 137    *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask);
 138
 139    return 0;
 140}
 141static int xen_pt_word_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
 142                                uint16_t *value, uint16_t valid_mask)
 143{
 144    XenPTRegInfo *reg = cfg_entry->reg;
 145    uint16_t valid_emu_mask = 0;
 146    uint16_t *data = cfg_entry->ptr.half_word;
 147
 148    /* emulate word register */
 149    valid_emu_mask = reg->emu_mask & valid_mask;
 150    *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask);
 151
 152    return 0;
 153}
 154static int xen_pt_long_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
 155                                uint32_t *value, uint32_t valid_mask)
 156{
 157    XenPTRegInfo *reg = cfg_entry->reg;
 158    uint32_t valid_emu_mask = 0;
 159    uint32_t *data = cfg_entry->ptr.word;
 160
 161    /* emulate long register */
 162    valid_emu_mask = reg->emu_mask & valid_mask;
 163    *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask);
 164
 165    return 0;
 166}
 167
 168/* Write register functions */
 169
 170static int xen_pt_byte_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
 171                                 uint8_t *val, uint8_t dev_value,
 172                                 uint8_t valid_mask)
 173{
 174    XenPTRegInfo *reg = cfg_entry->reg;
 175    uint8_t writable_mask = 0;
 176    uint8_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
 177    uint8_t *data = cfg_entry->ptr.byte;
 178
 179    /* modify emulate register */
 180    writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
 181    *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
 182
 183    /* create value for writing to I/O device register */
 184    *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask,
 185                              throughable_mask);
 186
 187    return 0;
 188}
 189static int xen_pt_word_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
 190                                 uint16_t *val, uint16_t dev_value,
 191                                 uint16_t valid_mask)
 192{
 193    XenPTRegInfo *reg = cfg_entry->reg;
 194    uint16_t writable_mask = 0;
 195    uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
 196    uint16_t *data = cfg_entry->ptr.half_word;
 197
 198    /* modify emulate register */
 199    writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
 200    *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
 201
 202    /* create value for writing to I/O device register */
 203    *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask,
 204                              throughable_mask);
 205
 206    return 0;
 207}
 208static int xen_pt_long_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
 209                                 uint32_t *val, uint32_t dev_value,
 210                                 uint32_t valid_mask)
 211{
 212    XenPTRegInfo *reg = cfg_entry->reg;
 213    uint32_t writable_mask = 0;
 214    uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
 215    uint32_t *data = cfg_entry->ptr.word;
 216
 217    /* modify emulate register */
 218    writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
 219    *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
 220
 221    /* create value for writing to I/O device register */
 222    *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask,
 223                              throughable_mask);
 224
 225    return 0;
 226}
 227
 228
 229/* XenPTRegInfo declaration
 230 * - only for emulated register (either a part or whole bit).
 231 * - for passthrough register that need special behavior (like interacting with
 232 *   other component), set emu_mask to all 0 and specify r/w func properly.
 233 * - do NOT use ALL F for init_val, otherwise the tbl will not be registered.
 234 */
 235
 236/********************
 237 * Header Type0
 238 */
 239
 240static int xen_pt_vendor_reg_init(XenPCIPassthroughState *s,
 241                                  XenPTRegInfo *reg, uint32_t real_offset,
 242                                  uint32_t *data)
 243{
 244    *data = s->real_device.vendor_id;
 245    return 0;
 246}
 247static int xen_pt_device_reg_init(XenPCIPassthroughState *s,
 248                                  XenPTRegInfo *reg, uint32_t real_offset,
 249                                  uint32_t *data)
 250{
 251    *data = s->real_device.device_id;
 252    return 0;
 253}
 254static int xen_pt_status_reg_init(XenPCIPassthroughState *s,
 255                                  XenPTRegInfo *reg, uint32_t real_offset,
 256                                  uint32_t *data)
 257{
 258    XenPTRegGroup *reg_grp_entry = NULL;
 259    XenPTReg *reg_entry = NULL;
 260    uint32_t reg_field = 0;
 261
 262    /* find Header register group */
 263    reg_grp_entry = xen_pt_find_reg_grp(s, PCI_CAPABILITY_LIST);
 264    if (reg_grp_entry) {
 265        /* find Capabilities Pointer register */
 266        reg_entry = xen_pt_find_reg(reg_grp_entry, PCI_CAPABILITY_LIST);
 267        if (reg_entry) {
 268            /* check Capabilities Pointer register */
 269            if (*reg_entry->ptr.half_word) {
 270                reg_field |= PCI_STATUS_CAP_LIST;
 271            } else {
 272                reg_field &= ~PCI_STATUS_CAP_LIST;
 273            }
 274        } else {
 275            xen_shutdown_fatal_error("Internal error: Couldn't find XenPTReg*"
 276                                     " for Capabilities Pointer register."
 277                                     " (%s)\n", __func__);
 278            return -1;
 279        }
 280    } else {
 281        xen_shutdown_fatal_error("Internal error: Couldn't find XenPTRegGroup"
 282                                 " for Header. (%s)\n", __func__);
 283        return -1;
 284    }
 285
 286    *data = reg_field;
 287    return 0;
 288}
 289static int xen_pt_header_type_reg_init(XenPCIPassthroughState *s,
 290                                       XenPTRegInfo *reg, uint32_t real_offset,
 291                                       uint32_t *data)
 292{
 293    /* read PCI_HEADER_TYPE */
 294    *data = reg->init_val | 0x80;
 295    return 0;
 296}
 297
 298/* initialize Interrupt Pin register */
 299static int xen_pt_irqpin_reg_init(XenPCIPassthroughState *s,
 300                                  XenPTRegInfo *reg, uint32_t real_offset,
 301                                  uint32_t *data)
 302{
 303    *data = xen_pt_pci_read_intx(s);
 304    return 0;
 305}
 306
 307/* Command register */
 308static int xen_pt_cmd_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
 309                                uint16_t *val, uint16_t dev_value,
 310                                uint16_t valid_mask)
 311{
 312    XenPTRegInfo *reg = cfg_entry->reg;
 313    uint16_t writable_mask = 0;
 314    uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
 315    uint16_t *data = cfg_entry->ptr.half_word;
 316
 317    /* modify emulate register */
 318    writable_mask = ~reg->ro_mask & valid_mask;
 319    *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
 320
 321    /* create value for writing to I/O device register */
 322    if (*val & PCI_COMMAND_INTX_DISABLE) {
 323        throughable_mask |= PCI_COMMAND_INTX_DISABLE;
 324    } else {
 325        if (s->machine_irq) {
 326            throughable_mask |= PCI_COMMAND_INTX_DISABLE;
 327        }
 328    }
 329
 330    *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
 331
 332    return 0;
 333}
 334
 335/* BAR */
 336#define XEN_PT_BAR_MEM_RO_MASK    0x0000000F  /* BAR ReadOnly mask(Memory) */
 337#define XEN_PT_BAR_MEM_EMU_MASK   0xFFFFFFF0  /* BAR emul mask(Memory) */
 338#define XEN_PT_BAR_IO_RO_MASK     0x00000003  /* BAR ReadOnly mask(I/O) */
 339#define XEN_PT_BAR_IO_EMU_MASK    0xFFFFFFFC  /* BAR emul mask(I/O) */
 340
 341static bool is_64bit_bar(PCIIORegion *r)
 342{
 343    return !!(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64);
 344}
 345
 346static uint64_t xen_pt_get_bar_size(PCIIORegion *r)
 347{
 348    if (is_64bit_bar(r)) {
 349        uint64_t size64;
 350        size64 = (r + 1)->size;
 351        size64 <<= 32;
 352        size64 += r->size;
 353        return size64;
 354    }
 355    return r->size;
 356}
 357
 358static XenPTBarFlag xen_pt_bar_reg_parse(XenPCIPassthroughState *s,
 359                                         int index)
 360{
 361    PCIDevice *d = &s->dev;
 362    XenPTRegion *region = NULL;
 363    PCIIORegion *r;
 364
 365    /* check 64bit BAR */
 366    if ((0 < index) && (index < PCI_ROM_SLOT)) {
 367        int type = s->real_device.io_regions[index - 1].type;
 368
 369        if ((type & XEN_HOST_PCI_REGION_TYPE_MEM)
 370            && (type & XEN_HOST_PCI_REGION_TYPE_MEM_64)) {
 371            region = &s->bases[index - 1];
 372            if (region->bar_flag != XEN_PT_BAR_FLAG_UPPER) {
 373                return XEN_PT_BAR_FLAG_UPPER;
 374            }
 375        }
 376    }
 377
 378    /* check unused BAR */
 379    r = &d->io_regions[index];
 380    if (!xen_pt_get_bar_size(r)) {
 381        return XEN_PT_BAR_FLAG_UNUSED;
 382    }
 383
 384    /* for ExpROM BAR */
 385    if (index == PCI_ROM_SLOT) {
 386        return XEN_PT_BAR_FLAG_MEM;
 387    }
 388
 389    /* check BAR I/O indicator */
 390    if (s->real_device.io_regions[index].type & XEN_HOST_PCI_REGION_TYPE_IO) {
 391        return XEN_PT_BAR_FLAG_IO;
 392    } else {
 393        return XEN_PT_BAR_FLAG_MEM;
 394    }
 395}
 396
 397static inline uint32_t base_address_with_flags(XenHostPCIIORegion *hr)
 398{
 399    if (hr->type & XEN_HOST_PCI_REGION_TYPE_IO) {
 400        return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_IO_MASK);
 401    } else {
 402        return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_MEM_MASK);
 403    }
 404}
 405
 406static int xen_pt_bar_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg,
 407                               uint32_t real_offset, uint32_t *data)
 408{
 409    uint32_t reg_field = 0;
 410    int index;
 411
 412    index = xen_pt_bar_offset_to_index(reg->offset);
 413    if (index < 0 || index >= PCI_NUM_REGIONS) {
 414        XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index);
 415        return -1;
 416    }
 417
 418    /* set BAR flag */
 419    s->bases[index].bar_flag = xen_pt_bar_reg_parse(s, index);
 420    if (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED) {
 421        reg_field = XEN_PT_INVALID_REG;
 422    }
 423
 424    *data = reg_field;
 425    return 0;
 426}
 427static int xen_pt_bar_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
 428                               uint32_t *value, uint32_t valid_mask)
 429{
 430    XenPTRegInfo *reg = cfg_entry->reg;
 431    uint32_t valid_emu_mask = 0;
 432    uint32_t bar_emu_mask = 0;
 433    int index;
 434
 435    /* get BAR index */
 436    index = xen_pt_bar_offset_to_index(reg->offset);
 437    if (index < 0 || index >= PCI_NUM_REGIONS - 1) {
 438        XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index);
 439        return -1;
 440    }
 441
 442    /* use fixed-up value from kernel sysfs */
 443    *value = base_address_with_flags(&s->real_device.io_regions[index]);
 444
 445    /* set emulate mask depend on BAR flag */
 446    switch (s->bases[index].bar_flag) {
 447    case XEN_PT_BAR_FLAG_MEM:
 448        bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK;
 449        break;
 450    case XEN_PT_BAR_FLAG_IO:
 451        bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK;
 452        break;
 453    case XEN_PT_BAR_FLAG_UPPER:
 454        bar_emu_mask = XEN_PT_BAR_ALLF;
 455        break;
 456    default:
 457        break;
 458    }
 459
 460    /* emulate BAR */
 461    valid_emu_mask = bar_emu_mask & valid_mask;
 462    *value = XEN_PT_MERGE_VALUE(*value, *cfg_entry->ptr.word, ~valid_emu_mask);
 463
 464    return 0;
 465}
 466static int xen_pt_bar_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
 467                                uint32_t *val, uint32_t dev_value,
 468                                uint32_t valid_mask)
 469{
 470    XenPTRegInfo *reg = cfg_entry->reg;
 471    XenPTRegion *base = NULL;
 472    PCIDevice *d = &s->dev;
 473    const PCIIORegion *r;
 474    uint32_t writable_mask = 0;
 475    uint32_t bar_emu_mask = 0;
 476    uint32_t bar_ro_mask = 0;
 477    uint32_t r_size = 0;
 478    int index = 0;
 479    uint32_t *data = cfg_entry->ptr.word;
 480
 481    index = xen_pt_bar_offset_to_index(reg->offset);
 482    if (index < 0 || index >= PCI_NUM_REGIONS) {
 483        XEN_PT_ERR(d, "Internal error: Invalid BAR index [%d].\n", index);
 484        return -1;
 485    }
 486
 487    r = &d->io_regions[index];
 488    base = &s->bases[index];
 489    r_size = xen_pt_get_emul_size(base->bar_flag, r->size);
 490
 491    /* set emulate mask and read-only mask values depend on the BAR flag */
 492    switch (s->bases[index].bar_flag) {
 493    case XEN_PT_BAR_FLAG_MEM:
 494        bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK;
 495        if (!r_size) {
 496            /* low 32 bits mask for 64 bit bars */
 497            bar_ro_mask = XEN_PT_BAR_ALLF;
 498        } else {
 499            bar_ro_mask = XEN_PT_BAR_MEM_RO_MASK | (r_size - 1);
 500        }
 501        break;
 502    case XEN_PT_BAR_FLAG_IO:
 503        bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK;
 504        bar_ro_mask = XEN_PT_BAR_IO_RO_MASK | (r_size - 1);
 505        break;
 506    case XEN_PT_BAR_FLAG_UPPER:
 507        bar_emu_mask = XEN_PT_BAR_ALLF;
 508        bar_ro_mask = r_size ? r_size - 1 : 0;
 509        break;
 510    default:
 511        break;
 512    }
 513
 514    /* modify emulate register */
 515    writable_mask = bar_emu_mask & ~bar_ro_mask & valid_mask;
 516    *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
 517
 518    /* check whether we need to update the virtual region address or not */
 519    switch (s->bases[index].bar_flag) {
 520    case XEN_PT_BAR_FLAG_UPPER:
 521    case XEN_PT_BAR_FLAG_MEM:
 522        /* nothing to do */
 523        break;
 524    case XEN_PT_BAR_FLAG_IO:
 525        /* nothing to do */
 526        break;
 527    default:
 528        break;
 529    }
 530
 531    /* create value for writing to I/O device register */
 532    *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
 533
 534    return 0;
 535}
 536
 537/* write Exp ROM BAR */
 538static int xen_pt_exp_rom_bar_reg_write(XenPCIPassthroughState *s,
 539                                        XenPTReg *cfg_entry, uint32_t *val,
 540                                        uint32_t dev_value, uint32_t valid_mask)
 541{
 542    XenPTRegInfo *reg = cfg_entry->reg;
 543    XenPTRegion *base = NULL;
 544    PCIDevice *d = (PCIDevice *)&s->dev;
 545    uint32_t writable_mask = 0;
 546    uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
 547    pcibus_t r_size = 0;
 548    uint32_t bar_ro_mask = 0;
 549    uint32_t *data = cfg_entry->ptr.word;
 550
 551    r_size = d->io_regions[PCI_ROM_SLOT].size;
 552    base = &s->bases[PCI_ROM_SLOT];
 553    /* align memory type resource size */
 554    r_size = xen_pt_get_emul_size(base->bar_flag, r_size);
 555
 556    /* set emulate mask and read-only mask */
 557    bar_ro_mask = (reg->ro_mask | (r_size - 1)) & ~PCI_ROM_ADDRESS_ENABLE;
 558
 559    /* modify emulate register */
 560    writable_mask = ~bar_ro_mask & valid_mask;
 561    *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
 562
 563    /* create value for writing to I/O device register */
 564    *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
 565
 566    return 0;
 567}
 568
 569static int xen_pt_intel_opregion_read(XenPCIPassthroughState *s,
 570                                      XenPTReg *cfg_entry,
 571                                      uint32_t *value, uint32_t valid_mask)
 572{
 573    *value = igd_read_opregion(s);
 574    return 0;
 575}
 576
 577static int xen_pt_intel_opregion_write(XenPCIPassthroughState *s,
 578                                       XenPTReg *cfg_entry, uint32_t *value,
 579                                       uint32_t dev_value, uint32_t valid_mask)
 580{
 581    igd_write_opregion(s, *value);
 582    return 0;
 583}
 584
 585/* Header Type0 reg static information table */
 586static XenPTRegInfo xen_pt_emu_reg_header0[] = {
 587    /* Vendor ID reg */
 588    {
 589        .offset     = PCI_VENDOR_ID,
 590        .size       = 2,
 591        .init_val   = 0x0000,
 592        .ro_mask    = 0xFFFF,
 593        .emu_mask   = 0xFFFF,
 594        .init       = xen_pt_vendor_reg_init,
 595        .u.w.read   = xen_pt_word_reg_read,
 596        .u.w.write  = xen_pt_word_reg_write,
 597    },
 598    /* Device ID reg */
 599    {
 600        .offset     = PCI_DEVICE_ID,
 601        .size       = 2,
 602        .init_val   = 0x0000,
 603        .ro_mask    = 0xFFFF,
 604        .emu_mask   = 0xFFFF,
 605        .init       = xen_pt_device_reg_init,
 606        .u.w.read   = xen_pt_word_reg_read,
 607        .u.w.write  = xen_pt_word_reg_write,
 608    },
 609    /* Command reg */
 610    {
 611        .offset     = PCI_COMMAND,
 612        .size       = 2,
 613        .init_val   = 0x0000,
 614        .res_mask   = 0xF880,
 615        .emu_mask   = 0x0743,
 616        .init       = xen_pt_common_reg_init,
 617        .u.w.read   = xen_pt_word_reg_read,
 618        .u.w.write  = xen_pt_cmd_reg_write,
 619    },
 620    /* Capabilities Pointer reg */
 621    {
 622        .offset     = PCI_CAPABILITY_LIST,
 623        .size       = 1,
 624        .init_val   = 0x00,
 625        .ro_mask    = 0xFF,
 626        .emu_mask   = 0xFF,
 627        .init       = xen_pt_ptr_reg_init,
 628        .u.b.read   = xen_pt_byte_reg_read,
 629        .u.b.write  = xen_pt_byte_reg_write,
 630    },
 631    /* Status reg */
 632    /* use emulated Cap Ptr value to initialize,
 633     * so need to be declared after Cap Ptr reg
 634     */
 635    {
 636        .offset     = PCI_STATUS,
 637        .size       = 2,
 638        .init_val   = 0x0000,
 639        .res_mask   = 0x0007,
 640        .ro_mask    = 0x06F8,
 641        .rw1c_mask  = 0xF900,
 642        .emu_mask   = 0x0010,
 643        .init       = xen_pt_status_reg_init,
 644        .u.w.read   = xen_pt_word_reg_read,
 645        .u.w.write  = xen_pt_word_reg_write,
 646    },
 647    /* Cache Line Size reg */
 648    {
 649        .offset     = PCI_CACHE_LINE_SIZE,
 650        .size       = 1,
 651        .init_val   = 0x00,
 652        .ro_mask    = 0x00,
 653        .emu_mask   = 0xFF,
 654        .init       = xen_pt_common_reg_init,
 655        .u.b.read   = xen_pt_byte_reg_read,
 656        .u.b.write  = xen_pt_byte_reg_write,
 657    },
 658    /* Latency Timer reg */
 659    {
 660        .offset     = PCI_LATENCY_TIMER,
 661        .size       = 1,
 662        .init_val   = 0x00,
 663        .ro_mask    = 0x00,
 664        .emu_mask   = 0xFF,
 665        .init       = xen_pt_common_reg_init,
 666        .u.b.read   = xen_pt_byte_reg_read,
 667        .u.b.write  = xen_pt_byte_reg_write,
 668    },
 669    /* Header Type reg */
 670    {
 671        .offset     = PCI_HEADER_TYPE,
 672        .size       = 1,
 673        .init_val   = 0x00,
 674        .ro_mask    = 0xFF,
 675        .emu_mask   = 0x00,
 676        .init       = xen_pt_header_type_reg_init,
 677        .u.b.read   = xen_pt_byte_reg_read,
 678        .u.b.write  = xen_pt_byte_reg_write,
 679    },
 680    /* Interrupt Line reg */
 681    {
 682        .offset     = PCI_INTERRUPT_LINE,
 683        .size       = 1,
 684        .init_val   = 0x00,
 685        .ro_mask    = 0x00,
 686        .emu_mask   = 0xFF,
 687        .init       = xen_pt_common_reg_init,
 688        .u.b.read   = xen_pt_byte_reg_read,
 689        .u.b.write  = xen_pt_byte_reg_write,
 690    },
 691    /* Interrupt Pin reg */
 692    {
 693        .offset     = PCI_INTERRUPT_PIN,
 694        .size       = 1,
 695        .init_val   = 0x00,
 696        .ro_mask    = 0xFF,
 697        .emu_mask   = 0xFF,
 698        .init       = xen_pt_irqpin_reg_init,
 699        .u.b.read   = xen_pt_byte_reg_read,
 700        .u.b.write  = xen_pt_byte_reg_write,
 701    },
 702    /* BAR 0 reg */
 703    /* mask of BAR need to be decided later, depends on IO/MEM type */
 704    {
 705        .offset     = PCI_BASE_ADDRESS_0,
 706        .size       = 4,
 707        .init_val   = 0x00000000,
 708        .init       = xen_pt_bar_reg_init,
 709        .u.dw.read  = xen_pt_bar_reg_read,
 710        .u.dw.write = xen_pt_bar_reg_write,
 711    },
 712    /* BAR 1 reg */
 713    {
 714        .offset     = PCI_BASE_ADDRESS_1,
 715        .size       = 4,
 716        .init_val   = 0x00000000,
 717        .init       = xen_pt_bar_reg_init,
 718        .u.dw.read  = xen_pt_bar_reg_read,
 719        .u.dw.write = xen_pt_bar_reg_write,
 720    },
 721    /* BAR 2 reg */
 722    {
 723        .offset     = PCI_BASE_ADDRESS_2,
 724        .size       = 4,
 725        .init_val   = 0x00000000,
 726        .init       = xen_pt_bar_reg_init,
 727        .u.dw.read  = xen_pt_bar_reg_read,
 728        .u.dw.write = xen_pt_bar_reg_write,
 729    },
 730    /* BAR 3 reg */
 731    {
 732        .offset     = PCI_BASE_ADDRESS_3,
 733        .size       = 4,
 734        .init_val   = 0x00000000,
 735        .init       = xen_pt_bar_reg_init,
 736        .u.dw.read  = xen_pt_bar_reg_read,
 737        .u.dw.write = xen_pt_bar_reg_write,
 738    },
 739    /* BAR 4 reg */
 740    {
 741        .offset     = PCI_BASE_ADDRESS_4,
 742        .size       = 4,
 743        .init_val   = 0x00000000,
 744        .init       = xen_pt_bar_reg_init,
 745        .u.dw.read  = xen_pt_bar_reg_read,
 746        .u.dw.write = xen_pt_bar_reg_write,
 747    },
 748    /* BAR 5 reg */
 749    {
 750        .offset     = PCI_BASE_ADDRESS_5,
 751        .size       = 4,
 752        .init_val   = 0x00000000,
 753        .init       = xen_pt_bar_reg_init,
 754        .u.dw.read  = xen_pt_bar_reg_read,
 755        .u.dw.write = xen_pt_bar_reg_write,
 756    },
 757    /* Expansion ROM BAR reg */
 758    {
 759        .offset     = PCI_ROM_ADDRESS,
 760        .size       = 4,
 761        .init_val   = 0x00000000,
 762        .ro_mask    = ~PCI_ROM_ADDRESS_MASK & ~PCI_ROM_ADDRESS_ENABLE,
 763        .emu_mask   = (uint32_t)PCI_ROM_ADDRESS_MASK,
 764        .init       = xen_pt_bar_reg_init,
 765        .u.dw.read  = xen_pt_long_reg_read,
 766        .u.dw.write = xen_pt_exp_rom_bar_reg_write,
 767    },
 768    {
 769        .size = 0,
 770    },
 771};
 772
 773
 774/*********************************
 775 * Vital Product Data Capability
 776 */
 777
 778/* Vital Product Data Capability Structure reg static information table */
 779static XenPTRegInfo xen_pt_emu_reg_vpd[] = {
 780    {
 781        .offset     = PCI_CAP_LIST_NEXT,
 782        .size       = 1,
 783        .init_val   = 0x00,
 784        .ro_mask    = 0xFF,
 785        .emu_mask   = 0xFF,
 786        .init       = xen_pt_ptr_reg_init,
 787        .u.b.read   = xen_pt_byte_reg_read,
 788        .u.b.write  = xen_pt_byte_reg_write,
 789    },
 790    {
 791        .offset     = PCI_VPD_ADDR,
 792        .size       = 2,
 793        .ro_mask    = 0x0003,
 794        .emu_mask   = 0x0003,
 795        .init       = xen_pt_common_reg_init,
 796        .u.w.read   = xen_pt_word_reg_read,
 797        .u.w.write  = xen_pt_word_reg_write,
 798    },
 799    {
 800        .size = 0,
 801    },
 802};
 803
 804
 805/**************************************
 806 * Vendor Specific Capability
 807 */
 808
 809/* Vendor Specific Capability Structure reg static information table */
 810static XenPTRegInfo xen_pt_emu_reg_vendor[] = {
 811    {
 812        .offset     = PCI_CAP_LIST_NEXT,
 813        .size       = 1,
 814        .init_val   = 0x00,
 815        .ro_mask    = 0xFF,
 816        .emu_mask   = 0xFF,
 817        .init       = xen_pt_ptr_reg_init,
 818        .u.b.read   = xen_pt_byte_reg_read,
 819        .u.b.write  = xen_pt_byte_reg_write,
 820    },
 821    {
 822        .size = 0,
 823    },
 824};
 825
 826
 827/*****************************
 828 * PCI Express Capability
 829 */
 830
 831static inline uint8_t get_capability_version(XenPCIPassthroughState *s,
 832                                             uint32_t offset)
 833{
 834    uint8_t flag;
 835    if (xen_host_pci_get_byte(&s->real_device, offset + PCI_EXP_FLAGS, &flag)) {
 836        return 0;
 837    }
 838    return flag & PCI_EXP_FLAGS_VERS;
 839}
 840
 841static inline uint8_t get_device_type(XenPCIPassthroughState *s,
 842                                      uint32_t offset)
 843{
 844    uint8_t flag;
 845    if (xen_host_pci_get_byte(&s->real_device, offset + PCI_EXP_FLAGS, &flag)) {
 846        return 0;
 847    }
 848    return (flag & PCI_EXP_FLAGS_TYPE) >> 4;
 849}
 850
 851/* initialize Link Control register */
 852static int xen_pt_linkctrl_reg_init(XenPCIPassthroughState *s,
 853                                    XenPTRegInfo *reg, uint32_t real_offset,
 854                                    uint32_t *data)
 855{
 856    uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
 857    uint8_t dev_type = get_device_type(s, real_offset - reg->offset);
 858
 859    /* no need to initialize in case of Root Complex Integrated Endpoint
 860     * with cap_ver 1.x
 861     */
 862    if ((dev_type == PCI_EXP_TYPE_RC_END) && (cap_ver == 1)) {
 863        *data = XEN_PT_INVALID_REG;
 864    }
 865
 866    *data = reg->init_val;
 867    return 0;
 868}
 869/* initialize Device Control 2 register */
 870static int xen_pt_devctrl2_reg_init(XenPCIPassthroughState *s,
 871                                    XenPTRegInfo *reg, uint32_t real_offset,
 872                                    uint32_t *data)
 873{
 874    uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
 875
 876    /* no need to initialize in case of cap_ver 1.x */
 877    if (cap_ver == 1) {
 878        *data = XEN_PT_INVALID_REG;
 879    }
 880
 881    *data = reg->init_val;
 882    return 0;
 883}
 884/* initialize Link Control 2 register */
 885static int xen_pt_linkctrl2_reg_init(XenPCIPassthroughState *s,
 886                                     XenPTRegInfo *reg, uint32_t real_offset,
 887                                     uint32_t *data)
 888{
 889    uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
 890    uint32_t reg_field = 0;
 891
 892    /* no need to initialize in case of cap_ver 1.x */
 893    if (cap_ver == 1) {
 894        reg_field = XEN_PT_INVALID_REG;
 895    } else {
 896        /* set Supported Link Speed */
 897        uint8_t lnkcap;
 898        int rc;
 899        rc = xen_host_pci_get_byte(&s->real_device,
 900                                   real_offset - reg->offset + PCI_EXP_LNKCAP,
 901                                   &lnkcap);
 902        if (rc) {
 903            return rc;
 904        }
 905        reg_field |= PCI_EXP_LNKCAP_SLS & lnkcap;
 906    }
 907
 908    *data = reg_field;
 909    return 0;
 910}
 911
 912/* PCI Express Capability Structure reg static information table */
 913static XenPTRegInfo xen_pt_emu_reg_pcie[] = {
 914    /* Next Pointer reg */
 915    {
 916        .offset     = PCI_CAP_LIST_NEXT,
 917        .size       = 1,
 918        .init_val   = 0x00,
 919        .ro_mask    = 0xFF,
 920        .emu_mask   = 0xFF,
 921        .init       = xen_pt_ptr_reg_init,
 922        .u.b.read   = xen_pt_byte_reg_read,
 923        .u.b.write  = xen_pt_byte_reg_write,
 924    },
 925    /* Device Capabilities reg */
 926    {
 927        .offset     = PCI_EXP_DEVCAP,
 928        .size       = 4,
 929        .init_val   = 0x00000000,
 930        .ro_mask    = 0xFFFFFFFF,
 931        .emu_mask   = 0x10000000,
 932        .init       = xen_pt_common_reg_init,
 933        .u.dw.read  = xen_pt_long_reg_read,
 934        .u.dw.write = xen_pt_long_reg_write,
 935    },
 936    /* Device Control reg */
 937    {
 938        .offset     = PCI_EXP_DEVCTL,
 939        .size       = 2,
 940        .init_val   = 0x2810,
 941        .ro_mask    = 0x8400,
 942        .emu_mask   = 0xFFFF,
 943        .init       = xen_pt_common_reg_init,
 944        .u.w.read   = xen_pt_word_reg_read,
 945        .u.w.write  = xen_pt_word_reg_write,
 946    },
 947    /* Device Status reg */
 948    {
 949        .offset     = PCI_EXP_DEVSTA,
 950        .size       = 2,
 951        .res_mask   = 0xFFC0,
 952        .ro_mask    = 0x0030,
 953        .rw1c_mask  = 0x000F,
 954        .init       = xen_pt_common_reg_init,
 955        .u.w.read   = xen_pt_word_reg_read,
 956        .u.w.write  = xen_pt_word_reg_write,
 957    },
 958    /* Link Control reg */
 959    {
 960        .offset     = PCI_EXP_LNKCTL,
 961        .size       = 2,
 962        .init_val   = 0x0000,
 963        .ro_mask    = 0xFC34,
 964        .emu_mask   = 0xFFFF,
 965        .init       = xen_pt_linkctrl_reg_init,
 966        .u.w.read   = xen_pt_word_reg_read,
 967        .u.w.write  = xen_pt_word_reg_write,
 968    },
 969    /* Link Status reg */
 970    {
 971        .offset     = PCI_EXP_LNKSTA,
 972        .size       = 2,
 973        .ro_mask    = 0x3FFF,
 974        .rw1c_mask  = 0xC000,
 975        .init       = xen_pt_common_reg_init,
 976        .u.w.read   = xen_pt_word_reg_read,
 977        .u.w.write  = xen_pt_word_reg_write,
 978    },
 979    /* Device Control 2 reg */
 980    {
 981        .offset     = 0x28,
 982        .size       = 2,
 983        .init_val   = 0x0000,
 984        .ro_mask    = 0xFFE0,
 985        .emu_mask   = 0xFFFF,
 986        .init       = xen_pt_devctrl2_reg_init,
 987        .u.w.read   = xen_pt_word_reg_read,
 988        .u.w.write  = xen_pt_word_reg_write,
 989    },
 990    /* Link Control 2 reg */
 991    {
 992        .offset     = 0x30,
 993        .size       = 2,
 994        .init_val   = 0x0000,
 995        .ro_mask    = 0xE040,
 996        .emu_mask   = 0xFFFF,
 997        .init       = xen_pt_linkctrl2_reg_init,
 998        .u.w.read   = xen_pt_word_reg_read,
 999        .u.w.write  = xen_pt_word_reg_write,
1000    },
1001    {
1002        .size = 0,
1003    },
1004};
1005
1006
1007/*********************************
1008 * Power Management Capability
1009 */
1010
1011/* Power Management Capability reg static information table */
1012static XenPTRegInfo xen_pt_emu_reg_pm[] = {
1013    /* Next Pointer reg */
1014    {
1015        .offset     = PCI_CAP_LIST_NEXT,
1016        .size       = 1,
1017        .init_val   = 0x00,
1018        .ro_mask    = 0xFF,
1019        .emu_mask   = 0xFF,
1020        .init       = xen_pt_ptr_reg_init,
1021        .u.b.read   = xen_pt_byte_reg_read,
1022        .u.b.write  = xen_pt_byte_reg_write,
1023    },
1024    /* Power Management Capabilities reg */
1025    {
1026        .offset     = PCI_CAP_FLAGS,
1027        .size       = 2,
1028        .init_val   = 0x0000,
1029        .ro_mask    = 0xFFFF,
1030        .emu_mask   = 0xF9C8,
1031        .init       = xen_pt_common_reg_init,
1032        .u.w.read   = xen_pt_word_reg_read,
1033        .u.w.write  = xen_pt_word_reg_write,
1034    },
1035    /* PCI Power Management Control/Status reg */
1036    {
1037        .offset     = PCI_PM_CTRL,
1038        .size       = 2,
1039        .init_val   = 0x0008,
1040        .res_mask   = 0x00F0,
1041        .ro_mask    = 0x610C,
1042        .rw1c_mask  = 0x8000,
1043        .emu_mask   = 0x810B,
1044        .init       = xen_pt_common_reg_init,
1045        .u.w.read   = xen_pt_word_reg_read,
1046        .u.w.write  = xen_pt_word_reg_write,
1047    },
1048    {
1049        .size = 0,
1050    },
1051};
1052
1053
1054/********************************
1055 * MSI Capability
1056 */
1057
1058/* Helper */
1059#define xen_pt_msi_check_type(offset, flags, what) \
1060        ((offset) == ((flags) & PCI_MSI_FLAGS_64BIT ? \
1061                      PCI_MSI_##what##_64 : PCI_MSI_##what##_32))
1062
1063/* Message Control register */
1064static int xen_pt_msgctrl_reg_init(XenPCIPassthroughState *s,
1065                                   XenPTRegInfo *reg, uint32_t real_offset,
1066                                   uint32_t *data)
1067{
1068    XenPTMSI *msi = s->msi;
1069    uint16_t reg_field;
1070    int rc;
1071
1072    /* use I/O device register's value as initial value */
1073    rc = xen_host_pci_get_word(&s->real_device, real_offset, &reg_field);
1074    if (rc) {
1075        return rc;
1076    }
1077    if (reg_field & PCI_MSI_FLAGS_ENABLE) {
1078        XEN_PT_LOG(&s->dev, "MSI already enabled, disabling it first\n");
1079        xen_host_pci_set_word(&s->real_device, real_offset,
1080                              reg_field & ~PCI_MSI_FLAGS_ENABLE);
1081    }
1082    msi->flags |= reg_field;
1083    msi->ctrl_offset = real_offset;
1084    msi->initialized = false;
1085    msi->mapped = false;
1086
1087    *data = reg->init_val;
1088    return 0;
1089}
1090static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s,
1091                                    XenPTReg *cfg_entry, uint16_t *val,
1092                                    uint16_t dev_value, uint16_t valid_mask)
1093{
1094    XenPTRegInfo *reg = cfg_entry->reg;
1095    XenPTMSI *msi = s->msi;
1096    uint16_t writable_mask = 0;
1097    uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
1098    uint16_t *data = cfg_entry->ptr.half_word;
1099
1100    /* Currently no support for multi-vector */
1101    if (*val & PCI_MSI_FLAGS_QSIZE) {
1102        XEN_PT_WARN(&s->dev, "Tries to set more than 1 vector ctrl %x\n", *val);
1103    }
1104
1105    /* modify emulate register */
1106    writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
1107    *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
1108    msi->flags |= *data & ~PCI_MSI_FLAGS_ENABLE;
1109
1110    /* create value for writing to I/O device register */
1111    *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
1112
1113    /* update MSI */
1114    if (*val & PCI_MSI_FLAGS_ENABLE) {
1115        /* setup MSI pirq for the first time */
1116        if (!msi->initialized) {
1117            /* Init physical one */
1118            XEN_PT_LOG(&s->dev, "setup MSI (register: %x).\n", *val);
1119            if (xen_pt_msi_setup(s)) {
1120                /* We do not broadcast the error to the framework code, so
1121                 * that MSI errors are contained in MSI emulation code and
1122                 * QEMU can go on running.
1123                 * Guest MSI would be actually not working.
1124                 */
1125                *val &= ~PCI_MSI_FLAGS_ENABLE;
1126                XEN_PT_WARN(&s->dev, "Can not map MSI (register: %x)!\n", *val);
1127                return 0;
1128            }
1129            if (xen_pt_msi_update(s)) {
1130                *val &= ~PCI_MSI_FLAGS_ENABLE;
1131                XEN_PT_WARN(&s->dev, "Can not bind MSI (register: %x)!\n", *val);
1132                return 0;
1133            }
1134            msi->initialized = true;
1135            msi->mapped = true;
1136        }
1137        msi->flags |= PCI_MSI_FLAGS_ENABLE;
1138    } else if (msi->mapped) {
1139        xen_pt_msi_disable(s);
1140    }
1141
1142    return 0;
1143}
1144
1145/* initialize Message Upper Address register */
1146static int xen_pt_msgaddr64_reg_init(XenPCIPassthroughState *s,
1147                                     XenPTRegInfo *reg, uint32_t real_offset,
1148                                     uint32_t *data)
1149{
1150    /* no need to initialize in case of 32 bit type */
1151    if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) {
1152        *data = XEN_PT_INVALID_REG;
1153    } else {
1154        *data = reg->init_val;
1155    }
1156
1157    return 0;
1158}
1159/* this function will be called twice (for 32 bit and 64 bit type) */
1160/* initialize Message Data register */
1161static int xen_pt_msgdata_reg_init(XenPCIPassthroughState *s,
1162                                   XenPTRegInfo *reg, uint32_t real_offset,
1163                                   uint32_t *data)
1164{
1165    uint32_t flags = s->msi->flags;
1166    uint32_t offset = reg->offset;
1167
1168    /* check the offset whether matches the type or not */
1169    if (xen_pt_msi_check_type(offset, flags, DATA)) {
1170        *data = reg->init_val;
1171    } else {
1172        *data = XEN_PT_INVALID_REG;
1173    }
1174    return 0;
1175}
1176
1177/* this function will be called twice (for 32 bit and 64 bit type) */
1178/* initialize Mask register */
1179static int xen_pt_mask_reg_init(XenPCIPassthroughState *s,
1180                                XenPTRegInfo *reg, uint32_t real_offset,
1181                                uint32_t *data)
1182{
1183    uint32_t flags = s->msi->flags;
1184
1185    /* check the offset whether matches the type or not */
1186    if (!(flags & PCI_MSI_FLAGS_MASKBIT)) {
1187        *data = XEN_PT_INVALID_REG;
1188    } else if (xen_pt_msi_check_type(reg->offset, flags, MASK)) {
1189        *data = reg->init_val;
1190    } else {
1191        *data = XEN_PT_INVALID_REG;
1192    }
1193    return 0;
1194}
1195
1196/* this function will be called twice (for 32 bit and 64 bit type) */
1197/* initialize Pending register */
1198static int xen_pt_pending_reg_init(XenPCIPassthroughState *s,
1199                                   XenPTRegInfo *reg, uint32_t real_offset,
1200                                   uint32_t *data)
1201{
1202    uint32_t flags = s->msi->flags;
1203
1204    /* check the offset whether matches the type or not */
1205    if (!(flags & PCI_MSI_FLAGS_MASKBIT)) {
1206        *data = XEN_PT_INVALID_REG;
1207    } else if (xen_pt_msi_check_type(reg->offset, flags, PENDING)) {
1208        *data = reg->init_val;
1209    } else {
1210        *data = XEN_PT_INVALID_REG;
1211    }
1212    return 0;
1213}
1214
1215/* write Message Address register */
1216static int xen_pt_msgaddr32_reg_write(XenPCIPassthroughState *s,
1217                                      XenPTReg *cfg_entry, uint32_t *val,
1218                                      uint32_t dev_value, uint32_t valid_mask)
1219{
1220    XenPTRegInfo *reg = cfg_entry->reg;
1221    uint32_t writable_mask = 0;
1222    uint32_t old_addr = *cfg_entry->ptr.word;
1223    uint32_t *data = cfg_entry->ptr.word;
1224
1225    /* modify emulate register */
1226    writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
1227    *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
1228    s->msi->addr_lo = *data;
1229
1230    /* create value for writing to I/O device register */
1231    *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
1232
1233    /* update MSI */
1234    if (*data != old_addr) {
1235        if (s->msi->mapped) {
1236            xen_pt_msi_update(s);
1237        }
1238    }
1239
1240    return 0;
1241}
1242/* write Message Upper Address register */
1243static int xen_pt_msgaddr64_reg_write(XenPCIPassthroughState *s,
1244                                      XenPTReg *cfg_entry, uint32_t *val,
1245                                      uint32_t dev_value, uint32_t valid_mask)
1246{
1247    XenPTRegInfo *reg = cfg_entry->reg;
1248    uint32_t writable_mask = 0;
1249    uint32_t old_addr = *cfg_entry->ptr.word;
1250    uint32_t *data = cfg_entry->ptr.word;
1251
1252    /* check whether the type is 64 bit or not */
1253    if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) {
1254        XEN_PT_ERR(&s->dev,
1255                   "Can't write to the upper address without 64 bit support\n");
1256        return -1;
1257    }
1258
1259    /* modify emulate register */
1260    writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
1261    *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
1262    /* update the msi_info too */
1263    s->msi->addr_hi = *data;
1264
1265    /* create value for writing to I/O device register */
1266    *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
1267
1268    /* update MSI */
1269    if (*data != old_addr) {
1270        if (s->msi->mapped) {
1271            xen_pt_msi_update(s);
1272        }
1273    }
1274
1275    return 0;
1276}
1277
1278
1279/* this function will be called twice (for 32 bit and 64 bit type) */
1280/* write Message Data register */
1281static int xen_pt_msgdata_reg_write(XenPCIPassthroughState *s,
1282                                    XenPTReg *cfg_entry, uint16_t *val,
1283                                    uint16_t dev_value, uint16_t valid_mask)
1284{
1285    XenPTRegInfo *reg = cfg_entry->reg;
1286    XenPTMSI *msi = s->msi;
1287    uint16_t writable_mask = 0;
1288    uint16_t old_data = *cfg_entry->ptr.half_word;
1289    uint32_t offset = reg->offset;
1290    uint16_t *data = cfg_entry->ptr.half_word;
1291
1292    /* check the offset whether matches the type or not */
1293    if (!xen_pt_msi_check_type(offset, msi->flags, DATA)) {
1294        /* exit I/O emulator */
1295        XEN_PT_ERR(&s->dev, "the offset does not match the 32/64 bit type!\n");
1296        return -1;
1297    }
1298
1299    /* modify emulate register */
1300    writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
1301    *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
1302    /* update the msi_info too */
1303    msi->data = *data;
1304
1305    /* create value for writing to I/O device register */
1306    *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
1307
1308    /* update MSI */
1309    if (*data != old_data) {
1310        if (msi->mapped) {
1311            xen_pt_msi_update(s);
1312        }
1313    }
1314
1315    return 0;
1316}
1317
1318static int xen_pt_mask_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
1319                                 uint32_t *val, uint32_t dev_value,
1320                                 uint32_t valid_mask)
1321{
1322    int rc;
1323
1324    rc = xen_pt_long_reg_write(s, cfg_entry, val, dev_value, valid_mask);
1325    if (rc) {
1326        return rc;
1327    }
1328
1329    s->msi->mask = *val;
1330
1331    return 0;
1332}
1333
1334/* MSI Capability Structure reg static information table */
1335static XenPTRegInfo xen_pt_emu_reg_msi[] = {
1336    /* Next Pointer reg */
1337    {
1338        .offset     = PCI_CAP_LIST_NEXT,
1339        .size       = 1,
1340        .init_val   = 0x00,
1341        .ro_mask    = 0xFF,
1342        .emu_mask   = 0xFF,
1343        .init       = xen_pt_ptr_reg_init,
1344        .u.b.read   = xen_pt_byte_reg_read,
1345        .u.b.write  = xen_pt_byte_reg_write,
1346    },
1347    /* Message Control reg */
1348    {
1349        .offset     = PCI_MSI_FLAGS,
1350        .size       = 2,
1351        .init_val   = 0x0000,
1352        .res_mask   = 0xFE00,
1353        .ro_mask    = 0x018E,
1354        .emu_mask   = 0x017E,
1355        .init       = xen_pt_msgctrl_reg_init,
1356        .u.w.read   = xen_pt_word_reg_read,
1357        .u.w.write  = xen_pt_msgctrl_reg_write,
1358    },
1359    /* Message Address reg */
1360    {
1361        .offset     = PCI_MSI_ADDRESS_LO,
1362        .size       = 4,
1363        .init_val   = 0x00000000,
1364        .ro_mask    = 0x00000003,
1365        .emu_mask   = 0xFFFFFFFF,
1366        .init       = xen_pt_common_reg_init,
1367        .u.dw.read  = xen_pt_long_reg_read,
1368        .u.dw.write = xen_pt_msgaddr32_reg_write,
1369    },
1370    /* Message Upper Address reg (if PCI_MSI_FLAGS_64BIT set) */
1371    {
1372        .offset     = PCI_MSI_ADDRESS_HI,
1373        .size       = 4,
1374        .init_val   = 0x00000000,
1375        .ro_mask    = 0x00000000,
1376        .emu_mask   = 0xFFFFFFFF,
1377        .init       = xen_pt_msgaddr64_reg_init,
1378        .u.dw.read  = xen_pt_long_reg_read,
1379        .u.dw.write = xen_pt_msgaddr64_reg_write,
1380    },
1381    /* Message Data reg (16 bits of data for 32-bit devices) */
1382    {
1383        .offset     = PCI_MSI_DATA_32,
1384        .size       = 2,
1385        .init_val   = 0x0000,
1386        .ro_mask    = 0x0000,
1387        .emu_mask   = 0xFFFF,
1388        .init       = xen_pt_msgdata_reg_init,
1389        .u.w.read   = xen_pt_word_reg_read,
1390        .u.w.write  = xen_pt_msgdata_reg_write,
1391    },
1392    /* Message Data reg (16 bits of data for 64-bit devices) */
1393    {
1394        .offset     = PCI_MSI_DATA_64,
1395        .size       = 2,
1396        .init_val   = 0x0000,
1397        .ro_mask    = 0x0000,
1398        .emu_mask   = 0xFFFF,
1399        .init       = xen_pt_msgdata_reg_init,
1400        .u.w.read   = xen_pt_word_reg_read,
1401        .u.w.write  = xen_pt_msgdata_reg_write,
1402    },
1403    /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */
1404    {
1405        .offset     = PCI_MSI_MASK_32,
1406        .size       = 4,
1407        .init_val   = 0x00000000,
1408        .ro_mask    = 0xFFFFFFFF,
1409        .emu_mask   = 0xFFFFFFFF,
1410        .init       = xen_pt_mask_reg_init,
1411        .u.dw.read  = xen_pt_long_reg_read,
1412        .u.dw.write = xen_pt_mask_reg_write,
1413    },
1414    /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */
1415    {
1416        .offset     = PCI_MSI_MASK_64,
1417        .size       = 4,
1418        .init_val   = 0x00000000,
1419        .ro_mask    = 0xFFFFFFFF,
1420        .emu_mask   = 0xFFFFFFFF,
1421        .init       = xen_pt_mask_reg_init,
1422        .u.dw.read  = xen_pt_long_reg_read,
1423        .u.dw.write = xen_pt_mask_reg_write,
1424    },
1425    /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */
1426    {
1427        .offset     = PCI_MSI_MASK_32 + 4,
1428        .size       = 4,
1429        .init_val   = 0x00000000,
1430        .ro_mask    = 0xFFFFFFFF,
1431        .emu_mask   = 0x00000000,
1432        .init       = xen_pt_pending_reg_init,
1433        .u.dw.read  = xen_pt_long_reg_read,
1434        .u.dw.write = xen_pt_long_reg_write,
1435    },
1436    /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */
1437    {
1438        .offset     = PCI_MSI_MASK_64 + 4,
1439        .size       = 4,
1440        .init_val   = 0x00000000,
1441        .ro_mask    = 0xFFFFFFFF,
1442        .emu_mask   = 0x00000000,
1443        .init       = xen_pt_pending_reg_init,
1444        .u.dw.read  = xen_pt_long_reg_read,
1445        .u.dw.write = xen_pt_long_reg_write,
1446    },
1447    {
1448        .size = 0,
1449    },
1450};
1451
1452
1453/**************************************
1454 * MSI-X Capability
1455 */
1456
1457/* Message Control register for MSI-X */
1458static int xen_pt_msixctrl_reg_init(XenPCIPassthroughState *s,
1459                                    XenPTRegInfo *reg, uint32_t real_offset,
1460                                    uint32_t *data)
1461{
1462    uint16_t reg_field;
1463    int rc;
1464
1465    /* use I/O device register's value as initial value */
1466    rc = xen_host_pci_get_word(&s->real_device, real_offset, &reg_field);
1467    if (rc) {
1468        return rc;
1469    }
1470    if (reg_field & PCI_MSIX_FLAGS_ENABLE) {
1471        XEN_PT_LOG(&s->dev, "MSIX already enabled, disabling it first\n");
1472        xen_host_pci_set_word(&s->real_device, real_offset,
1473                              reg_field & ~PCI_MSIX_FLAGS_ENABLE);
1474    }
1475
1476    s->msix->ctrl_offset = real_offset;
1477
1478    *data = reg->init_val;
1479    return 0;
1480}
1481static int xen_pt_msixctrl_reg_write(XenPCIPassthroughState *s,
1482                                     XenPTReg *cfg_entry, uint16_t *val,
1483                                     uint16_t dev_value, uint16_t valid_mask)
1484{
1485    XenPTRegInfo *reg = cfg_entry->reg;
1486    uint16_t writable_mask = 0;
1487    uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
1488    int debug_msix_enabled_old;
1489    uint16_t *data = cfg_entry->ptr.half_word;
1490
1491    /* modify emulate register */
1492    writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
1493    *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
1494
1495    /* create value for writing to I/O device register */
1496    *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
1497
1498    /* update MSI-X */
1499    if ((*val & PCI_MSIX_FLAGS_ENABLE)
1500        && !(*val & PCI_MSIX_FLAGS_MASKALL)) {
1501        xen_pt_msix_update(s);
1502    } else if (!(*val & PCI_MSIX_FLAGS_ENABLE) && s->msix->enabled) {
1503        xen_pt_msix_disable(s);
1504    }
1505
1506    s->msix->maskall = *val & PCI_MSIX_FLAGS_MASKALL;
1507
1508    debug_msix_enabled_old = s->msix->enabled;
1509    s->msix->enabled = !!(*val & PCI_MSIX_FLAGS_ENABLE);
1510    if (s->msix->enabled != debug_msix_enabled_old) {
1511        XEN_PT_LOG(&s->dev, "%s MSI-X\n",
1512                   s->msix->enabled ? "enable" : "disable");
1513    }
1514
1515    return 0;
1516}
1517
1518/* MSI-X Capability Structure reg static information table */
1519static XenPTRegInfo xen_pt_emu_reg_msix[] = {
1520    /* Next Pointer reg */
1521    {
1522        .offset     = PCI_CAP_LIST_NEXT,
1523        .size       = 1,
1524        .init_val   = 0x00,
1525        .ro_mask    = 0xFF,
1526        .emu_mask   = 0xFF,
1527        .init       = xen_pt_ptr_reg_init,
1528        .u.b.read   = xen_pt_byte_reg_read,
1529        .u.b.write  = xen_pt_byte_reg_write,
1530    },
1531    /* Message Control reg */
1532    {
1533        .offset     = PCI_MSI_FLAGS,
1534        .size       = 2,
1535        .init_val   = 0x0000,
1536        .res_mask   = 0x3800,
1537        .ro_mask    = 0x07FF,
1538        .emu_mask   = 0x0000,
1539        .init       = xen_pt_msixctrl_reg_init,
1540        .u.w.read   = xen_pt_word_reg_read,
1541        .u.w.write  = xen_pt_msixctrl_reg_write,
1542    },
1543    {
1544        .size = 0,
1545    },
1546};
1547
1548static XenPTRegInfo xen_pt_emu_reg_igd_opregion[] = {
1549    /* Intel IGFX OpRegion reg */
1550    {
1551        .offset     = 0x0,
1552        .size       = 4,
1553        .init_val   = 0,
1554        .emu_mask   = 0xFFFFFFFF,
1555        .u.dw.read   = xen_pt_intel_opregion_read,
1556        .u.dw.write  = xen_pt_intel_opregion_write,
1557    },
1558    {
1559        .size = 0,
1560    },
1561};
1562
1563/****************************
1564 * Capabilities
1565 */
1566
1567/* capability structure register group size functions */
1568
1569static int xen_pt_reg_grp_size_init(XenPCIPassthroughState *s,
1570                                    const XenPTRegGroupInfo *grp_reg,
1571                                    uint32_t base_offset, uint8_t *size)
1572{
1573    *size = grp_reg->grp_size;
1574    return 0;
1575}
1576/* get Vendor Specific Capability Structure register group size */
1577static int xen_pt_vendor_size_init(XenPCIPassthroughState *s,
1578                                   const XenPTRegGroupInfo *grp_reg,
1579                                   uint32_t base_offset, uint8_t *size)
1580{
1581    return xen_host_pci_get_byte(&s->real_device, base_offset + 0x02, size);
1582}
1583/* get PCI Express Capability Structure register group size */
1584static int xen_pt_pcie_size_init(XenPCIPassthroughState *s,
1585                                 const XenPTRegGroupInfo *grp_reg,
1586                                 uint32_t base_offset, uint8_t *size)
1587{
1588    PCIDevice *d = &s->dev;
1589    uint8_t version = get_capability_version(s, base_offset);
1590    uint8_t type = get_device_type(s, base_offset);
1591    uint8_t pcie_size = 0;
1592
1593
1594    /* calculate size depend on capability version and device/port type */
1595    /* in case of PCI Express Base Specification Rev 1.x */
1596    if (version == 1) {
1597        /* The PCI Express Capabilities, Device Capabilities, and Device
1598         * Status/Control registers are required for all PCI Express devices.
1599         * The Link Capabilities and Link Status/Control are required for all
1600         * Endpoints that are not Root Complex Integrated Endpoints. Endpoints
1601         * are not required to implement registers other than those listed
1602         * above and terminate the capability structure.
1603         */
1604        switch (type) {
1605        case PCI_EXP_TYPE_ENDPOINT:
1606        case PCI_EXP_TYPE_LEG_END:
1607            pcie_size = 0x14;
1608            break;
1609        case PCI_EXP_TYPE_RC_END:
1610            /* has no link */
1611            pcie_size = 0x0C;
1612            break;
1613            /* only EndPoint passthrough is supported */
1614        case PCI_EXP_TYPE_ROOT_PORT:
1615        case PCI_EXP_TYPE_UPSTREAM:
1616        case PCI_EXP_TYPE_DOWNSTREAM:
1617        case PCI_EXP_TYPE_PCI_BRIDGE:
1618        case PCI_EXP_TYPE_PCIE_BRIDGE:
1619        case PCI_EXP_TYPE_RC_EC:
1620        default:
1621            XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type);
1622            return -1;
1623        }
1624    }
1625    /* in case of PCI Express Base Specification Rev 2.0 */
1626    else if (version == 2) {
1627        switch (type) {
1628        case PCI_EXP_TYPE_ENDPOINT:
1629        case PCI_EXP_TYPE_LEG_END:
1630        case PCI_EXP_TYPE_RC_END:
1631            /* For Functions that do not implement the registers,
1632             * these spaces must be hardwired to 0b.
1633             */
1634            pcie_size = 0x3C;
1635            break;
1636            /* only EndPoint passthrough is supported */
1637        case PCI_EXP_TYPE_ROOT_PORT:
1638        case PCI_EXP_TYPE_UPSTREAM:
1639        case PCI_EXP_TYPE_DOWNSTREAM:
1640        case PCI_EXP_TYPE_PCI_BRIDGE:
1641        case PCI_EXP_TYPE_PCIE_BRIDGE:
1642        case PCI_EXP_TYPE_RC_EC:
1643        default:
1644            XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type);
1645            return -1;
1646        }
1647    } else {
1648        XEN_PT_ERR(d, "Unsupported capability version %#x.\n", version);
1649        return -1;
1650    }
1651
1652    *size = pcie_size;
1653    return 0;
1654}
1655/* get MSI Capability Structure register group size */
1656static int xen_pt_msi_size_init(XenPCIPassthroughState *s,
1657                                const XenPTRegGroupInfo *grp_reg,
1658                                uint32_t base_offset, uint8_t *size)
1659{
1660    uint16_t msg_ctrl = 0;
1661    uint8_t msi_size = 0xa;
1662    int rc;
1663
1664    rc = xen_host_pci_get_word(&s->real_device, base_offset + PCI_MSI_FLAGS,
1665                               &msg_ctrl);
1666    if (rc) {
1667        return rc;
1668    }
1669    /* check if 64-bit address is capable of per-vector masking */
1670    if (msg_ctrl & PCI_MSI_FLAGS_64BIT) {
1671        msi_size += 4;
1672    }
1673    if (msg_ctrl & PCI_MSI_FLAGS_MASKBIT) {
1674        msi_size += 10;
1675    }
1676
1677    s->msi = g_new0(XenPTMSI, 1);
1678    s->msi->pirq = XEN_PT_UNASSIGNED_PIRQ;
1679
1680    *size = msi_size;
1681    return 0;
1682}
1683/* get MSI-X Capability Structure register group size */
1684static int xen_pt_msix_size_init(XenPCIPassthroughState *s,
1685                                 const XenPTRegGroupInfo *grp_reg,
1686                                 uint32_t base_offset, uint8_t *size)
1687{
1688    int rc = 0;
1689
1690    rc = xen_pt_msix_init(s, base_offset);
1691
1692    if (rc < 0) {
1693        XEN_PT_ERR(&s->dev, "Internal error: Invalid xen_pt_msix_init.\n");
1694        return rc;
1695    }
1696
1697    *size = grp_reg->grp_size;
1698    return 0;
1699}
1700
1701
1702static const XenPTRegGroupInfo xen_pt_emu_reg_grps[] = {
1703    /* Header Type0 reg group */
1704    {
1705        .grp_id      = 0xFF,
1706        .grp_type    = XEN_PT_GRP_TYPE_EMU,
1707        .grp_size    = 0x40,
1708        .size_init   = xen_pt_reg_grp_size_init,
1709        .emu_regs = xen_pt_emu_reg_header0,
1710    },
1711    /* PCI PowerManagement Capability reg group */
1712    {
1713        .grp_id      = PCI_CAP_ID_PM,
1714        .grp_type    = XEN_PT_GRP_TYPE_EMU,
1715        .grp_size    = PCI_PM_SIZEOF,
1716        .size_init   = xen_pt_reg_grp_size_init,
1717        .emu_regs = xen_pt_emu_reg_pm,
1718    },
1719    /* AGP Capability Structure reg group */
1720    {
1721        .grp_id     = PCI_CAP_ID_AGP,
1722        .grp_type   = XEN_PT_GRP_TYPE_HARDWIRED,
1723        .grp_size   = 0x30,
1724        .size_init  = xen_pt_reg_grp_size_init,
1725    },
1726    /* Vital Product Data Capability Structure reg group */
1727    {
1728        .grp_id      = PCI_CAP_ID_VPD,
1729        .grp_type    = XEN_PT_GRP_TYPE_EMU,
1730        .grp_size    = 0x08,
1731        .size_init   = xen_pt_reg_grp_size_init,
1732        .emu_regs = xen_pt_emu_reg_vpd,
1733    },
1734    /* Slot Identification reg group */
1735    {
1736        .grp_id     = PCI_CAP_ID_SLOTID,
1737        .grp_type   = XEN_PT_GRP_TYPE_HARDWIRED,
1738        .grp_size   = 0x04,
1739        .size_init  = xen_pt_reg_grp_size_init,
1740    },
1741    /* MSI Capability Structure reg group */
1742    {
1743        .grp_id      = PCI_CAP_ID_MSI,
1744        .grp_type    = XEN_PT_GRP_TYPE_EMU,
1745        .grp_size    = 0xFF,
1746        .size_init   = xen_pt_msi_size_init,
1747        .emu_regs = xen_pt_emu_reg_msi,
1748    },
1749    /* PCI-X Capabilities List Item reg group */
1750    {
1751        .grp_id     = PCI_CAP_ID_PCIX,
1752        .grp_type   = XEN_PT_GRP_TYPE_HARDWIRED,
1753        .grp_size   = 0x18,
1754        .size_init  = xen_pt_reg_grp_size_init,
1755    },
1756    /* Vendor Specific Capability Structure reg group */
1757    {
1758        .grp_id      = PCI_CAP_ID_VNDR,
1759        .grp_type    = XEN_PT_GRP_TYPE_EMU,
1760        .grp_size    = 0xFF,
1761        .size_init   = xen_pt_vendor_size_init,
1762        .emu_regs = xen_pt_emu_reg_vendor,
1763    },
1764    /* SHPC Capability List Item reg group */
1765    {
1766        .grp_id     = PCI_CAP_ID_SHPC,
1767        .grp_type   = XEN_PT_GRP_TYPE_HARDWIRED,
1768        .grp_size   = 0x08,
1769        .size_init  = xen_pt_reg_grp_size_init,
1770    },
1771    /* Subsystem ID and Subsystem Vendor ID Capability List Item reg group */
1772    {
1773        .grp_id     = PCI_CAP_ID_SSVID,
1774        .grp_type   = XEN_PT_GRP_TYPE_HARDWIRED,
1775        .grp_size   = 0x08,
1776        .size_init  = xen_pt_reg_grp_size_init,
1777    },
1778    /* AGP 8x Capability Structure reg group */
1779    {
1780        .grp_id     = PCI_CAP_ID_AGP3,
1781        .grp_type   = XEN_PT_GRP_TYPE_HARDWIRED,
1782        .grp_size   = 0x30,
1783        .size_init  = xen_pt_reg_grp_size_init,
1784    },
1785    /* PCI Express Capability Structure reg group */
1786    {
1787        .grp_id      = PCI_CAP_ID_EXP,
1788        .grp_type    = XEN_PT_GRP_TYPE_EMU,
1789        .grp_size    = 0xFF,
1790        .size_init   = xen_pt_pcie_size_init,
1791        .emu_regs = xen_pt_emu_reg_pcie,
1792    },
1793    /* MSI-X Capability Structure reg group */
1794    {
1795        .grp_id      = PCI_CAP_ID_MSIX,
1796        .grp_type    = XEN_PT_GRP_TYPE_EMU,
1797        .grp_size    = 0x0C,
1798        .size_init   = xen_pt_msix_size_init,
1799        .emu_regs = xen_pt_emu_reg_msix,
1800    },
1801    /* Intel IGD Opregion group */
1802    {
1803        .grp_id      = XEN_PCI_INTEL_OPREGION,
1804        .grp_type    = XEN_PT_GRP_TYPE_EMU,
1805        .grp_size    = 0x4,
1806        .size_init   = xen_pt_reg_grp_size_init,
1807        .emu_regs    = xen_pt_emu_reg_igd_opregion,
1808    },
1809    {
1810        .grp_size = 0,
1811    },
1812};
1813
1814/* initialize Capabilities Pointer or Next Pointer register */
1815static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s,
1816                               XenPTRegInfo *reg, uint32_t real_offset,
1817                               uint32_t *data)
1818{
1819    int i, rc;
1820    uint8_t reg_field;
1821    uint8_t cap_id = 0;
1822
1823    rc = xen_host_pci_get_byte(&s->real_device, real_offset, &reg_field);
1824    if (rc) {
1825        return rc;
1826    }
1827    /* find capability offset */
1828    while (reg_field) {
1829        for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) {
1830            if (xen_pt_hide_dev_cap(&s->real_device,
1831                                    xen_pt_emu_reg_grps[i].grp_id)) {
1832                continue;
1833            }
1834
1835            rc = xen_host_pci_get_byte(&s->real_device,
1836                                       reg_field + PCI_CAP_LIST_ID, &cap_id);
1837            if (rc) {
1838                XEN_PT_ERR(&s->dev, "Failed to read capability @0x%x (rc:%d)\n",
1839                           reg_field + PCI_CAP_LIST_ID, rc);
1840                return rc;
1841            }
1842            if (xen_pt_emu_reg_grps[i].grp_id == cap_id) {
1843                if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) {
1844                    goto out;
1845                }
1846                /* ignore the 0 hardwired capability, find next one */
1847                break;
1848            }
1849        }
1850
1851        /* next capability */
1852        rc = xen_host_pci_get_byte(&s->real_device,
1853                                   reg_field + PCI_CAP_LIST_NEXT, &reg_field);
1854        if (rc) {
1855            return rc;
1856        }
1857    }
1858
1859out:
1860    *data = reg_field;
1861    return 0;
1862}
1863
1864
1865/*************
1866 * Main
1867 */
1868
1869static uint8_t find_cap_offset(XenPCIPassthroughState *s, uint8_t cap)
1870{
1871    uint8_t id;
1872    unsigned max_cap = XEN_PCI_CAP_MAX;
1873    uint8_t pos = PCI_CAPABILITY_LIST;
1874    uint8_t status = 0;
1875
1876    if (xen_host_pci_get_byte(&s->real_device, PCI_STATUS, &status)) {
1877        return 0;
1878    }
1879    if ((status & PCI_STATUS_CAP_LIST) == 0) {
1880        return 0;
1881    }
1882
1883    while (max_cap--) {
1884        if (xen_host_pci_get_byte(&s->real_device, pos, &pos)) {
1885            break;
1886        }
1887        if (pos < PCI_CONFIG_HEADER_SIZE) {
1888            break;
1889        }
1890
1891        pos &= ~3;
1892        if (xen_host_pci_get_byte(&s->real_device,
1893                                  pos + PCI_CAP_LIST_ID, &id)) {
1894            break;
1895        }
1896
1897        if (id == 0xff) {
1898            break;
1899        }
1900        if (id == cap) {
1901            return pos;
1902        }
1903
1904        pos += PCI_CAP_LIST_NEXT;
1905    }
1906    return 0;
1907}
1908
1909static void xen_pt_config_reg_init(XenPCIPassthroughState *s,
1910                                   XenPTRegGroup *reg_grp, XenPTRegInfo *reg,
1911                                   Error **errp)
1912{
1913    XenPTReg *reg_entry;
1914    uint32_t data = 0;
1915    int rc = 0;
1916
1917    reg_entry = g_new0(XenPTReg, 1);
1918    reg_entry->reg = reg;
1919
1920    if (reg->init) {
1921        uint32_t host_mask, size_mask;
1922        unsigned int offset;
1923        uint32_t val;
1924
1925        /* initialize emulate register */
1926        rc = reg->init(s, reg_entry->reg,
1927                       reg_grp->base_offset + reg->offset, &data);
1928        if (rc < 0) {
1929            g_free(reg_entry);
1930            error_setg(errp, "Init emulate register fail");
1931            return;
1932        }
1933        if (data == XEN_PT_INVALID_REG) {
1934            /* free unused BAR register entry */
1935            g_free(reg_entry);
1936            return;
1937        }
1938        /* Sync up the data to dev.config */
1939        offset = reg_grp->base_offset + reg->offset;
1940        size_mask = 0xFFFFFFFF >> ((4 - reg->size) << 3);
1941
1942        switch (reg->size) {
1943        case 1: rc = xen_host_pci_get_byte(&s->real_device, offset, (uint8_t *)&val);
1944                break;
1945        case 2: rc = xen_host_pci_get_word(&s->real_device, offset, (uint16_t *)&val);
1946                break;
1947        case 4: rc = xen_host_pci_get_long(&s->real_device, offset, &val);
1948                break;
1949        default: abort();
1950        }
1951        if (rc) {
1952            /* Serious issues when we cannot read the host values! */
1953            g_free(reg_entry);
1954            error_setg(errp, "Cannot read host values");
1955            return;
1956        }
1957        /* Set bits in emu_mask are the ones we emulate. The dev.config shall
1958         * contain the emulated view of the guest - therefore we flip the mask
1959         * to mask out the host values (which dev.config initially has) . */
1960        host_mask = size_mask & ~reg->emu_mask;
1961
1962        if ((data & host_mask) != (val & host_mask)) {
1963            uint32_t new_val;
1964
1965            /* Mask out host (including past size). */
1966            new_val = val & host_mask;
1967            /* Merge emulated ones (excluding the non-emulated ones). */
1968            new_val |= data & host_mask;
1969            /* Leave intact host and emulated values past the size - even though
1970             * we do not care as we write per reg->size granularity, but for the
1971             * logging below lets have the proper value. */
1972            new_val |= ((val | data)) & ~size_mask;
1973            XEN_PT_LOG(&s->dev,"Offset 0x%04x mismatch! Emulated=0x%04x, host=0x%04x, syncing to 0x%04x.\n",
1974                       offset, data, val, new_val);
1975            val = new_val;
1976        } else
1977            val = data;
1978
1979        if (val & ~size_mask) {
1980            error_setg(errp, "Offset 0x%04x:0x%04x expands past"
1981                    " register size (%d)", offset, val, reg->size);
1982            g_free(reg_entry);
1983            return;
1984        }
1985        /* This could be just pci_set_long as we don't modify the bits
1986         * past reg->size, but in case this routine is run in parallel or the
1987         * init value is larger, we do not want to over-write registers. */
1988        switch (reg->size) {
1989        case 1: pci_set_byte(s->dev.config + offset, (uint8_t)val);
1990                break;
1991        case 2: pci_set_word(s->dev.config + offset, (uint16_t)val);
1992                break;
1993        case 4: pci_set_long(s->dev.config + offset, val);
1994                break;
1995        default: abort();
1996        }
1997        /* set register value pointer to the data. */
1998        reg_entry->ptr.byte = s->dev.config + offset;
1999
2000    }
2001    /* list add register entry */
2002    QLIST_INSERT_HEAD(&reg_grp->reg_tbl_list, reg_entry, entries);
2003}
2004
2005void xen_pt_config_init(XenPCIPassthroughState *s, Error **errp)
2006{
2007    int i, rc;
2008    Error *err = NULL;
2009
2010    QLIST_INIT(&s->reg_grps);
2011
2012    for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) {
2013        uint32_t reg_grp_offset = 0;
2014        XenPTRegGroup *reg_grp_entry = NULL;
2015
2016        if (xen_pt_emu_reg_grps[i].grp_id != 0xFF
2017            && xen_pt_emu_reg_grps[i].grp_id != XEN_PCI_INTEL_OPREGION) {
2018            if (xen_pt_hide_dev_cap(&s->real_device,
2019                                    xen_pt_emu_reg_grps[i].grp_id)) {
2020                continue;
2021            }
2022
2023            reg_grp_offset = find_cap_offset(s, xen_pt_emu_reg_grps[i].grp_id);
2024
2025            if (!reg_grp_offset) {
2026                continue;
2027            }
2028        }
2029
2030        /*
2031         * By default we will trap up to 0x40 in the cfg space.
2032         * If an intel device is pass through we need to trap 0xfc,
2033         * therefore the size should be 0xff.
2034         */
2035        if (xen_pt_emu_reg_grps[i].grp_id == XEN_PCI_INTEL_OPREGION) {
2036            reg_grp_offset = XEN_PCI_INTEL_OPREGION;
2037        }
2038
2039        reg_grp_entry = g_new0(XenPTRegGroup, 1);
2040        QLIST_INIT(&reg_grp_entry->reg_tbl_list);
2041        QLIST_INSERT_HEAD(&s->reg_grps, reg_grp_entry, entries);
2042
2043        reg_grp_entry->base_offset = reg_grp_offset;
2044        reg_grp_entry->reg_grp = xen_pt_emu_reg_grps + i;
2045        if (xen_pt_emu_reg_grps[i].size_init) {
2046            /* get register group size */
2047            rc = xen_pt_emu_reg_grps[i].size_init(s, reg_grp_entry->reg_grp,
2048                                                  reg_grp_offset,
2049                                                  &reg_grp_entry->size);
2050            if (rc < 0) {
2051                error_setg(&err, "Failed to initialize %d/%zu, type = 0x%x,"
2052                           " rc: %d", i, ARRAY_SIZE(xen_pt_emu_reg_grps),
2053                           xen_pt_emu_reg_grps[i].grp_type, rc);
2054                error_propagate(errp, err);
2055                xen_pt_config_delete(s);
2056                return;
2057            }
2058        }
2059
2060        if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) {
2061            if (xen_pt_emu_reg_grps[i].emu_regs) {
2062                int j = 0;
2063                XenPTRegInfo *regs = xen_pt_emu_reg_grps[i].emu_regs;
2064
2065                /* initialize capability register */
2066                for (j = 0; regs->size != 0; j++, regs++) {
2067                    xen_pt_config_reg_init(s, reg_grp_entry, regs, &err);
2068                    if (err) {
2069                        error_append_hint(&err, "Failed to init register %d"
2070                                " offsets 0x%x in grp_type = 0x%x (%d/%zu)", j,
2071                                regs->offset, xen_pt_emu_reg_grps[i].grp_type,
2072                                i, ARRAY_SIZE(xen_pt_emu_reg_grps));
2073                        error_propagate(errp, err);
2074                        xen_pt_config_delete(s);
2075                        return;
2076                    }
2077                }
2078            }
2079        }
2080    }
2081}
2082
2083/* delete all emulate register */
2084void xen_pt_config_delete(XenPCIPassthroughState *s)
2085{
2086    struct XenPTRegGroup *reg_group, *next_grp;
2087    struct XenPTReg *reg, *next_reg;
2088
2089    /* free MSI/MSI-X info table */
2090    if (s->msix) {
2091        xen_pt_msix_unmap(s);
2092    }
2093    g_free(s->msi);
2094
2095    /* free all register group entry */
2096    QLIST_FOREACH_SAFE(reg_group, &s->reg_grps, entries, next_grp) {
2097        /* free all register entry */
2098        QLIST_FOREACH_SAFE(reg, &reg_group->reg_tbl_list, entries, next_reg) {
2099            QLIST_REMOVE(reg, entries);
2100            g_free(reg);
2101        }
2102
2103        QLIST_REMOVE(reg_group, entries);
2104        g_free(reg_group);
2105    }
2106}
2107