linux/arch/x86/pci/xen.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Xen PCI - handle PCI (INTx) and MSI infrastructure calls for PV, HVM and
   4 * initial domain support. We also handle the DSDT _PRT callbacks for GSI's
   5 * used in HVM and initial domain mode (PV does not parse ACPI, so it has no
   6 * concept of GSIs). Under PV we hook under the pnbbios API for IRQs and
   7 * 0xcf8 PCI configuration read/write.
   8 *
   9 *   Author: Ryan Wilson <hap9@epoch.ncsc.mil>
  10 *           Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  11 *           Stefano Stabellini <stefano.stabellini@eu.citrix.com>
  12 */
  13#include <linux/export.h>
  14#include <linux/init.h>
  15#include <linux/pci.h>
  16#include <linux/acpi.h>
  17
  18#include <linux/io.h>
  19#include <asm/io_apic.h>
  20#include <asm/pci_x86.h>
  21
  22#include <asm/xen/hypervisor.h>
  23
  24#include <xen/features.h>
  25#include <xen/events.h>
  26#include <asm/xen/pci.h>
  27#include <asm/xen/cpuid.h>
  28#include <asm/apic.h>
  29#include <asm/acpi.h>
  30#include <asm/i8259.h>
  31
  32static int xen_pcifront_enable_irq(struct pci_dev *dev)
  33{
  34        int rc;
  35        int share = 1;
  36        int pirq;
  37        u8 gsi;
  38
  39        rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
  40        if (rc < 0) {
  41                dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
  42                         rc);
  43                return rc;
  44        }
  45        /* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/
  46        pirq = gsi;
  47
  48        if (gsi < nr_legacy_irqs())
  49                share = 0;
  50
  51        rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront");
  52        if (rc < 0) {
  53                dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n",
  54                         gsi, pirq, rc);
  55                return rc;
  56        }
  57
  58        dev->irq = rc;
  59        dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq);
  60        return 0;
  61}
  62
  63#ifdef CONFIG_ACPI
  64static int xen_register_pirq(u32 gsi, int triggering, bool set_pirq)
  65{
  66        int rc, pirq = -1, irq;
  67        struct physdev_map_pirq map_irq;
  68        int shareable = 0;
  69        char *name;
  70
  71        irq = xen_irq_from_gsi(gsi);
  72        if (irq > 0)
  73                return irq;
  74
  75        if (set_pirq)
  76                pirq = gsi;
  77
  78        map_irq.domid = DOMID_SELF;
  79        map_irq.type = MAP_PIRQ_TYPE_GSI;
  80        map_irq.index = gsi;
  81        map_irq.pirq = pirq;
  82
  83        rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
  84        if (rc) {
  85                printk(KERN_WARNING "xen map irq failed %d\n", rc);
  86                return -1;
  87        }
  88
  89        if (triggering == ACPI_EDGE_SENSITIVE) {
  90                shareable = 0;
  91                name = "ioapic-edge";
  92        } else {
  93                shareable = 1;
  94                name = "ioapic-level";
  95        }
  96
  97        irq = xen_bind_pirq_gsi_to_irq(gsi, map_irq.pirq, shareable, name);
  98        if (irq < 0)
  99                goto out;
 100
 101        printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n", map_irq.pirq, irq, gsi);
 102out:
 103        return irq;
 104}
 105
 106static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
 107                                     int trigger, int polarity)
 108{
 109        if (!xen_hvm_domain())
 110                return -1;
 111
 112        return xen_register_pirq(gsi, trigger,
 113                                 false /* no mapping of GSI to PIRQ */);
 114}
 115
 116#ifdef CONFIG_XEN_DOM0
 117static int xen_register_gsi(u32 gsi, int triggering, int polarity)
 118{
 119        int rc, irq;
 120        struct physdev_setup_gsi setup_gsi;
 121
 122        if (!xen_pv_domain())
 123                return -1;
 124
 125        printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
 126                        gsi, triggering, polarity);
 127
 128        irq = xen_register_pirq(gsi, triggering, true);
 129
 130        setup_gsi.gsi = gsi;
 131        setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
 132        setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
 133
 134        rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
 135        if (rc == -EEXIST)
 136                printk(KERN_INFO "Already setup the GSI :%d\n", gsi);
 137        else if (rc) {
 138                printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n",
 139                                gsi, rc);
 140        }
 141
 142        return irq;
 143}
 144
 145static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
 146                                 int trigger, int polarity)
 147{
 148        return xen_register_gsi(gsi, trigger, polarity);
 149}
 150#endif
 151#endif
 152
 153#if defined(CONFIG_PCI_MSI)
 154#include <linux/msi.h>
 155#include <asm/msidef.h>
 156
 157struct xen_pci_frontend_ops *xen_pci_frontend;
 158EXPORT_SYMBOL_GPL(xen_pci_frontend);
 159
 160struct xen_msi_ops {
 161        int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
 162        void (*teardown_msi_irqs)(struct pci_dev *dev);
 163};
 164
 165static struct xen_msi_ops xen_msi_ops __ro_after_init;
 166
 167static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
 168{
 169        int irq, ret, i;
 170        struct msi_desc *msidesc;
 171        int *v;
 172
 173        if (type == PCI_CAP_ID_MSI && nvec > 1)
 174                return 1;
 175
 176        v = kcalloc(max(1, nvec), sizeof(int), GFP_KERNEL);
 177        if (!v)
 178                return -ENOMEM;
 179
 180        if (type == PCI_CAP_ID_MSIX)
 181                ret = xen_pci_frontend_enable_msix(dev, v, nvec);
 182        else
 183                ret = xen_pci_frontend_enable_msi(dev, v);
 184        if (ret)
 185                goto error;
 186        i = 0;
 187        for_each_pci_msi_entry(msidesc, dev) {
 188                irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i],
 189                                               (type == PCI_CAP_ID_MSI) ? nvec : 1,
 190                                               (type == PCI_CAP_ID_MSIX) ?
 191                                               "pcifront-msi-x" :
 192                                               "pcifront-msi",
 193                                                DOMID_SELF);
 194                if (irq < 0) {
 195                        ret = irq;
 196                        goto free;
 197                }
 198                i++;
 199        }
 200        kfree(v);
 201        return 0;
 202
 203error:
 204        if (ret == -ENOSYS)
 205                dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
 206        else if (ret)
 207                dev_err(&dev->dev, "Xen PCI frontend error: %d!\n", ret);
 208free:
 209        kfree(v);
 210        return ret;
 211}
 212
 213#define XEN_PIRQ_MSI_DATA  (MSI_DATA_TRIGGER_EDGE | \
 214                MSI_DATA_LEVEL_ASSERT | (3 << 8) | MSI_DATA_VECTOR(0))
 215
 216static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq,
 217                struct msi_msg *msg)
 218{
 219        /* We set vector == 0 to tell the hypervisor we don't care about it,
 220         * but we want a pirq setup instead.
 221         * We use the dest_id field to pass the pirq that we want. */
 222        msg->address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(pirq);
 223        msg->address_lo =
 224                MSI_ADDR_BASE_LO |
 225                MSI_ADDR_DEST_MODE_PHYSICAL |
 226                MSI_ADDR_REDIRECTION_CPU |
 227                MSI_ADDR_DEST_ID(pirq);
 228
 229        msg->data = XEN_PIRQ_MSI_DATA;
 230}
 231
 232static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
 233{
 234        int irq, pirq;
 235        struct msi_desc *msidesc;
 236        struct msi_msg msg;
 237
 238        if (type == PCI_CAP_ID_MSI && nvec > 1)
 239                return 1;
 240
 241        for_each_pci_msi_entry(msidesc, dev) {
 242                pirq = xen_allocate_pirq_msi(dev, msidesc);
 243                if (pirq < 0) {
 244                        irq = -ENODEV;
 245                        goto error;
 246                }
 247                xen_msi_compose_msg(dev, pirq, &msg);
 248                __pci_write_msi_msg(msidesc, &msg);
 249                dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
 250                irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq,
 251                                               (type == PCI_CAP_ID_MSI) ? nvec : 1,
 252                                               (type == PCI_CAP_ID_MSIX) ?
 253                                               "msi-x" : "msi",
 254                                               DOMID_SELF);
 255                if (irq < 0)
 256                        goto error;
 257                dev_dbg(&dev->dev,
 258                        "xen: msi --> pirq=%d --> irq=%d\n", pirq, irq);
 259        }
 260        return 0;
 261
 262error:
 263        dev_err(&dev->dev, "Failed to create MSI%s! ret=%d!\n",
 264                type == PCI_CAP_ID_MSI ? "" : "-X", irq);
 265        return irq;
 266}
 267
 268#ifdef CONFIG_XEN_DOM0
 269static bool __read_mostly pci_seg_supported = true;
 270
 271static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
 272{
 273        int ret = 0;
 274        struct msi_desc *msidesc;
 275
 276        for_each_pci_msi_entry(msidesc, dev) {
 277                struct physdev_map_pirq map_irq;
 278                domid_t domid;
 279
 280                domid = ret = xen_find_device_domain_owner(dev);
 281                /* N.B. Casting int's -ENODEV to uint16_t results in 0xFFED,
 282                 * hence check ret value for < 0. */
 283                if (ret < 0)
 284                        domid = DOMID_SELF;
 285
 286                memset(&map_irq, 0, sizeof(map_irq));
 287                map_irq.domid = domid;
 288                map_irq.type = MAP_PIRQ_TYPE_MSI_SEG;
 289                map_irq.index = -1;
 290                map_irq.pirq = -1;
 291                map_irq.bus = dev->bus->number |
 292                              (pci_domain_nr(dev->bus) << 16);
 293                map_irq.devfn = dev->devfn;
 294
 295                if (type == PCI_CAP_ID_MSI && nvec > 1) {
 296                        map_irq.type = MAP_PIRQ_TYPE_MULTI_MSI;
 297                        map_irq.entry_nr = nvec;
 298                } else if (type == PCI_CAP_ID_MSIX) {
 299                        int pos;
 300                        unsigned long flags;
 301                        u32 table_offset, bir;
 302
 303                        pos = dev->msix_cap;
 304                        pci_read_config_dword(dev, pos + PCI_MSIX_TABLE,
 305                                              &table_offset);
 306                        bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
 307                        flags = pci_resource_flags(dev, bir);
 308                        if (!flags || (flags & IORESOURCE_UNSET))
 309                                return -EINVAL;
 310
 311                        map_irq.table_base = pci_resource_start(dev, bir);
 312                        map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
 313                }
 314
 315                ret = -EINVAL;
 316                if (pci_seg_supported)
 317                        ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq,
 318                                                    &map_irq);
 319                if (type == PCI_CAP_ID_MSI && nvec > 1 && ret) {
 320                        /*
 321                         * If MAP_PIRQ_TYPE_MULTI_MSI is not available
 322                         * there's nothing else we can do in this case.
 323                         * Just set ret > 0 so driver can retry with
 324                         * single MSI.
 325                         */
 326                        ret = 1;
 327                        goto out;
 328                }
 329                if (ret == -EINVAL && !pci_domain_nr(dev->bus)) {
 330                        map_irq.type = MAP_PIRQ_TYPE_MSI;
 331                        map_irq.index = -1;
 332                        map_irq.pirq = -1;
 333                        map_irq.bus = dev->bus->number;
 334                        ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq,
 335                                                    &map_irq);
 336                        if (ret != -EINVAL)
 337                                pci_seg_supported = false;
 338                }
 339                if (ret) {
 340                        dev_warn(&dev->dev, "xen map irq failed %d for %d domain\n",
 341                                 ret, domid);
 342                        goto out;
 343                }
 344
 345                ret = xen_bind_pirq_msi_to_irq(dev, msidesc, map_irq.pirq,
 346                                               (type == PCI_CAP_ID_MSI) ? nvec : 1,
 347                                               (type == PCI_CAP_ID_MSIX) ? "msi-x" : "msi",
 348                                               domid);
 349                if (ret < 0)
 350                        goto out;
 351        }
 352        ret = 0;
 353out:
 354        return ret;
 355}
 356
 357static void xen_initdom_restore_msi_irqs(struct pci_dev *dev)
 358{
 359        int ret = 0;
 360
 361        if (pci_seg_supported) {
 362                struct physdev_pci_device restore_ext;
 363
 364                restore_ext.seg = pci_domain_nr(dev->bus);
 365                restore_ext.bus = dev->bus->number;
 366                restore_ext.devfn = dev->devfn;
 367                ret = HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi_ext,
 368                                        &restore_ext);
 369                if (ret == -ENOSYS)
 370                        pci_seg_supported = false;
 371                WARN(ret && ret != -ENOSYS, "restore_msi_ext -> %d\n", ret);
 372        }
 373        if (!pci_seg_supported) {
 374                struct physdev_restore_msi restore;
 375
 376                restore.bus = dev->bus->number;
 377                restore.devfn = dev->devfn;
 378                ret = HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi, &restore);
 379                WARN(ret && ret != -ENOSYS, "restore_msi -> %d\n", ret);
 380        }
 381}
 382#else /* CONFIG_XEN_DOM0 */
 383#define xen_initdom_setup_msi_irqs      NULL
 384#define xen_initdom_restore_msi_irqs    NULL
 385#endif /* !CONFIG_XEN_DOM0 */
 386
 387static void xen_teardown_msi_irqs(struct pci_dev *dev)
 388{
 389        struct msi_desc *msidesc;
 390        int i;
 391
 392        for_each_pci_msi_entry(msidesc, dev) {
 393                if (msidesc->irq) {
 394                        for (i = 0; i < msidesc->nvec_used; i++)
 395                                xen_destroy_irq(msidesc->irq + i);
 396                }
 397        }
 398}
 399
 400static void xen_pv_teardown_msi_irqs(struct pci_dev *dev)
 401{
 402        struct msi_desc *msidesc = first_pci_msi_entry(dev);
 403
 404        if (msidesc->msi_attrib.is_msix)
 405                xen_pci_frontend_disable_msix(dev);
 406        else
 407                xen_pci_frontend_disable_msi(dev);
 408
 409        xen_teardown_msi_irqs(dev);
 410}
 411
 412static int xen_msi_domain_alloc_irqs(struct irq_domain *domain,
 413                                     struct device *dev,  int nvec)
 414{
 415        int type;
 416
 417        if (WARN_ON_ONCE(!dev_is_pci(dev)))
 418                return -EINVAL;
 419
 420        if (first_msi_entry(dev)->msi_attrib.is_msix)
 421                type = PCI_CAP_ID_MSIX;
 422        else
 423                type = PCI_CAP_ID_MSI;
 424
 425        return xen_msi_ops.setup_msi_irqs(to_pci_dev(dev), nvec, type);
 426}
 427
 428static void xen_msi_domain_free_irqs(struct irq_domain *domain,
 429                                     struct device *dev)
 430{
 431        if (WARN_ON_ONCE(!dev_is_pci(dev)))
 432                return;
 433
 434        xen_msi_ops.teardown_msi_irqs(to_pci_dev(dev));
 435}
 436
 437static struct msi_domain_ops xen_pci_msi_domain_ops = {
 438        .domain_alloc_irqs      = xen_msi_domain_alloc_irqs,
 439        .domain_free_irqs       = xen_msi_domain_free_irqs,
 440};
 441
 442static struct msi_domain_info xen_pci_msi_domain_info = {
 443        .ops                    = &xen_pci_msi_domain_ops,
 444};
 445
 446/*
 447 * This irq domain is a blatant violation of the irq domain design, but
 448 * distangling XEN into real irq domains is not a job for mere mortals with
 449 * limited XENology. But it's the least dangerous way for a mere mortal to
 450 * get rid of the arch_*_msi_irqs() hackery in order to store the irq
 451 * domain pointer in struct device. This irq domain wrappery allows to do
 452 * that without breaking XEN terminally.
 453 */
 454static __init struct irq_domain *xen_create_pci_msi_domain(void)
 455{
 456        struct irq_domain *d = NULL;
 457        struct fwnode_handle *fn;
 458
 459        fn = irq_domain_alloc_named_fwnode("XEN-MSI");
 460        if (fn)
 461                d = msi_create_irq_domain(fn, &xen_pci_msi_domain_info, NULL);
 462
 463        /* FIXME: No idea how to survive if this fails */
 464        BUG_ON(!d);
 465
 466        return d;
 467}
 468
 469static __init void xen_setup_pci_msi(void)
 470{
 471        if (xen_pv_domain()) {
 472                if (xen_initial_domain()) {
 473                        xen_msi_ops.setup_msi_irqs = xen_initdom_setup_msi_irqs;
 474                        x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs;
 475                } else {
 476                        xen_msi_ops.setup_msi_irqs = xen_setup_msi_irqs;
 477                }
 478                xen_msi_ops.teardown_msi_irqs = xen_pv_teardown_msi_irqs;
 479                pci_msi_ignore_mask = 1;
 480        } else if (xen_hvm_domain()) {
 481                xen_msi_ops.setup_msi_irqs = xen_hvm_setup_msi_irqs;
 482                xen_msi_ops.teardown_msi_irqs = xen_teardown_msi_irqs;
 483        } else {
 484                WARN_ON_ONCE(1);
 485                return;
 486        }
 487
 488        /*
 489         * Override the PCI/MSI irq domain init function. No point
 490         * in allocating the native domain and never use it.
 491         */
 492        x86_init.irqs.create_pci_msi_domain = xen_create_pci_msi_domain;
 493}
 494
 495#else /* CONFIG_PCI_MSI */
 496static inline void xen_setup_pci_msi(void) { }
 497#endif /* CONFIG_PCI_MSI */
 498
 499int __init pci_xen_init(void)
 500{
 501        if (!xen_pv_domain() || xen_initial_domain())
 502                return -ENODEV;
 503
 504        printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n");
 505
 506        pcibios_set_cache_line_size();
 507
 508        pcibios_enable_irq = xen_pcifront_enable_irq;
 509        pcibios_disable_irq = NULL;
 510
 511        /* Keep ACPI out of the picture */
 512        acpi_noirq_set();
 513
 514        xen_setup_pci_msi();
 515        return 0;
 516}
 517
 518#ifdef CONFIG_PCI_MSI
 519static void __init xen_hvm_msi_init(void)
 520{
 521        if (!disable_apic) {
 522                /*
 523                 * If hardware supports (x2)APIC virtualization (as indicated
 524                 * by hypervisor's leaf 4) then we don't need to use pirqs/
 525                 * event channels for MSI handling and instead use regular
 526                 * APIC processing
 527                 */
 528                uint32_t eax = cpuid_eax(xen_cpuid_base() + 4);
 529
 530                if (((eax & XEN_HVM_CPUID_X2APIC_VIRT) && x2apic_mode) ||
 531                    ((eax & XEN_HVM_CPUID_APIC_ACCESS_VIRT) && boot_cpu_has(X86_FEATURE_APIC)))
 532                        return;
 533        }
 534        xen_setup_pci_msi();
 535}
 536#endif
 537
 538int __init pci_xen_hvm_init(void)
 539{
 540        if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs))
 541                return 0;
 542
 543#ifdef CONFIG_ACPI
 544        /*
 545         * We don't want to change the actual ACPI delivery model,
 546         * just how GSIs get registered.
 547         */
 548        __acpi_register_gsi = acpi_register_gsi_xen_hvm;
 549        __acpi_unregister_gsi = NULL;
 550#endif
 551
 552#ifdef CONFIG_PCI_MSI
 553        /*
 554         * We need to wait until after x2apic is initialized
 555         * before we can set MSI IRQ ops.
 556         */
 557        x86_platform.apic_post_init = xen_hvm_msi_init;
 558#endif
 559        return 0;
 560}
 561
 562#ifdef CONFIG_XEN_DOM0
 563int __init pci_xen_initial_domain(void)
 564{
 565        int irq;
 566
 567        xen_setup_pci_msi();
 568        __acpi_register_gsi = acpi_register_gsi_xen;
 569        __acpi_unregister_gsi = NULL;
 570        /*
 571         * Pre-allocate the legacy IRQs.  Use NR_LEGACY_IRQS here
 572         * because we don't have a PIC and thus nr_legacy_irqs() is zero.
 573         */
 574        for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
 575                int trigger, polarity;
 576
 577                if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
 578                        continue;
 579
 580                xen_register_pirq(irq,
 581                        trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE,
 582                        true /* Map GSI to PIRQ */);
 583        }
 584        if (0 == nr_ioapics) {
 585                for (irq = 0; irq < nr_legacy_irqs(); irq++)
 586                        xen_bind_pirq_gsi_to_irq(irq, irq, 0, "xt-pic");
 587        }
 588        return 0;
 589}
 590
 591struct xen_device_domain_owner {
 592        domid_t domain;
 593        struct pci_dev *dev;
 594        struct list_head list;
 595};
 596
 597static DEFINE_SPINLOCK(dev_domain_list_spinlock);
 598static struct list_head dev_domain_list = LIST_HEAD_INIT(dev_domain_list);
 599
 600static struct xen_device_domain_owner *find_device(struct pci_dev *dev)
 601{
 602        struct xen_device_domain_owner *owner;
 603
 604        list_for_each_entry(owner, &dev_domain_list, list) {
 605                if (owner->dev == dev)
 606                        return owner;
 607        }
 608        return NULL;
 609}
 610
 611int xen_find_device_domain_owner(struct pci_dev *dev)
 612{
 613        struct xen_device_domain_owner *owner;
 614        int domain = -ENODEV;
 615
 616        spin_lock(&dev_domain_list_spinlock);
 617        owner = find_device(dev);
 618        if (owner)
 619                domain = owner->domain;
 620        spin_unlock(&dev_domain_list_spinlock);
 621        return domain;
 622}
 623EXPORT_SYMBOL_GPL(xen_find_device_domain_owner);
 624
 625int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain)
 626{
 627        struct xen_device_domain_owner *owner;
 628
 629        owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL);
 630        if (!owner)
 631                return -ENODEV;
 632
 633        spin_lock(&dev_domain_list_spinlock);
 634        if (find_device(dev)) {
 635                spin_unlock(&dev_domain_list_spinlock);
 636                kfree(owner);
 637                return -EEXIST;
 638        }
 639        owner->domain = domain;
 640        owner->dev = dev;
 641        list_add_tail(&owner->list, &dev_domain_list);
 642        spin_unlock(&dev_domain_list_spinlock);
 643        return 0;
 644}
 645EXPORT_SYMBOL_GPL(xen_register_device_domain_owner);
 646
 647int xen_unregister_device_domain_owner(struct pci_dev *dev)
 648{
 649        struct xen_device_domain_owner *owner;
 650
 651        spin_lock(&dev_domain_list_spinlock);
 652        owner = find_device(dev);
 653        if (!owner) {
 654                spin_unlock(&dev_domain_list_spinlock);
 655                return -ENODEV;
 656        }
 657        list_del(&owner->list);
 658        spin_unlock(&dev_domain_list_spinlock);
 659        kfree(owner);
 660        return 0;
 661}
 662EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner);
 663#endif
 664