linux/arch/x86/pci/xen.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Xen PCI - handle PCI (INTx) and MSI infrastructure calls for PV, HVM and
   4 * initial domain support. We also handle the DSDT _PRT callbacks for GSI's
   5 * used in HVM and initial domain mode (PV does not parse ACPI, so it has no
   6 * concept of GSIs). Under PV we hook under the pnbbios API for IRQs and
   7 * 0xcf8 PCI configuration read/write.
   8 *
   9 *   Author: Ryan Wilson <hap9@epoch.ncsc.mil>
  10 *           Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  11 *           Stefano Stabellini <stefano.stabellini@eu.citrix.com>
  12 */
  13#include <linux/export.h>
  14#include <linux/init.h>
  15#include <linux/pci.h>
  16#include <linux/acpi.h>
  17
  18#include <linux/io.h>
  19#include <asm/io_apic.h>
  20#include <asm/pci_x86.h>
  21
  22#include <asm/xen/hypervisor.h>
  23
  24#include <xen/features.h>
  25#include <xen/events.h>
  26#include <asm/xen/pci.h>
  27#include <asm/xen/cpuid.h>
  28#include <asm/apic.h>
  29#include <asm/acpi.h>
  30#include <asm/i8259.h>
  31
  32static int xen_pcifront_enable_irq(struct pci_dev *dev)
  33{
  34        int rc;
  35        int share = 1;
  36        int pirq;
  37        u8 gsi;
  38
  39        rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
  40        if (rc < 0) {
  41                dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
  42                         rc);
  43                return rc;
  44        }
  45        /* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/
  46        pirq = gsi;
  47
  48        if (gsi < nr_legacy_irqs())
  49                share = 0;
  50
  51        rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront");
  52        if (rc < 0) {
  53                dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n",
  54                         gsi, pirq, rc);
  55                return rc;
  56        }
  57
  58        dev->irq = rc;
  59        dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq);
  60        return 0;
  61}
  62
  63#ifdef CONFIG_ACPI
  64static int xen_register_pirq(u32 gsi, int triggering, bool set_pirq)
  65{
  66        int rc, pirq = -1, irq;
  67        struct physdev_map_pirq map_irq;
  68        int shareable = 0;
  69        char *name;
  70
  71        irq = xen_irq_from_gsi(gsi);
  72        if (irq > 0)
  73                return irq;
  74
  75        if (set_pirq)
  76                pirq = gsi;
  77
  78        map_irq.domid = DOMID_SELF;
  79        map_irq.type = MAP_PIRQ_TYPE_GSI;
  80        map_irq.index = gsi;
  81        map_irq.pirq = pirq;
  82
  83        rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
  84        if (rc) {
  85                printk(KERN_WARNING "xen map irq failed %d\n", rc);
  86                return -1;
  87        }
  88
  89        if (triggering == ACPI_EDGE_SENSITIVE) {
  90                shareable = 0;
  91                name = "ioapic-edge";
  92        } else {
  93                shareable = 1;
  94                name = "ioapic-level";
  95        }
  96
  97        irq = xen_bind_pirq_gsi_to_irq(gsi, map_irq.pirq, shareable, name);
  98        if (irq < 0)
  99                goto out;
 100
 101        printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n", map_irq.pirq, irq, gsi);
 102out:
 103        return irq;
 104}
 105
 106static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
 107                                     int trigger, int polarity)
 108{
 109        if (!xen_hvm_domain())
 110                return -1;
 111
 112        return xen_register_pirq(gsi, trigger,
 113                                 false /* no mapping of GSI to PIRQ */);
 114}
 115
 116#ifdef CONFIG_XEN_PV_DOM0
 117static int xen_register_gsi(u32 gsi, int triggering, int polarity)
 118{
 119        int rc, irq;
 120        struct physdev_setup_gsi setup_gsi;
 121
 122        if (!xen_pv_domain())
 123                return -1;
 124
 125        printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
 126                        gsi, triggering, polarity);
 127
 128        irq = xen_register_pirq(gsi, triggering, true);
 129
 130        setup_gsi.gsi = gsi;
 131        setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
 132        setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
 133
 134        rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
 135        if (rc == -EEXIST)
 136                printk(KERN_INFO "Already setup the GSI :%d\n", gsi);
 137        else if (rc) {
 138                printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n",
 139                                gsi, rc);
 140        }
 141
 142        return irq;
 143}
 144
 145static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
 146                                 int trigger, int polarity)
 147{
 148        return xen_register_gsi(gsi, trigger, polarity);
 149}
 150#endif
 151#endif
 152
 153#if defined(CONFIG_PCI_MSI)
 154#include <linux/msi.h>
 155
 156struct xen_pci_frontend_ops *xen_pci_frontend;
 157EXPORT_SYMBOL_GPL(xen_pci_frontend);
 158
 159struct xen_msi_ops {
 160        int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
 161        void (*teardown_msi_irqs)(struct pci_dev *dev);
 162};
 163
 164static struct xen_msi_ops xen_msi_ops __ro_after_init;
 165
 166static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
 167{
 168        int irq, ret, i;
 169        struct msi_desc *msidesc;
 170        int *v;
 171
 172        if (type == PCI_CAP_ID_MSI && nvec > 1)
 173                return 1;
 174
 175        v = kcalloc(max(1, nvec), sizeof(int), GFP_KERNEL);
 176        if (!v)
 177                return -ENOMEM;
 178
 179        if (type == PCI_CAP_ID_MSIX)
 180                ret = xen_pci_frontend_enable_msix(dev, v, nvec);
 181        else
 182                ret = xen_pci_frontend_enable_msi(dev, v);
 183        if (ret)
 184                goto error;
 185        i = 0;
 186        for_each_pci_msi_entry(msidesc, dev) {
 187                irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i],
 188                                               (type == PCI_CAP_ID_MSI) ? nvec : 1,
 189                                               (type == PCI_CAP_ID_MSIX) ?
 190                                               "pcifront-msi-x" :
 191                                               "pcifront-msi",
 192                                                DOMID_SELF);
 193                if (irq < 0) {
 194                        ret = irq;
 195                        goto free;
 196                }
 197                i++;
 198        }
 199        kfree(v);
 200        return 0;
 201
 202error:
 203        if (ret == -ENOSYS)
 204                dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
 205        else if (ret)
 206                dev_err(&dev->dev, "Xen PCI frontend error: %d!\n", ret);
 207free:
 208        kfree(v);
 209        return ret;
 210}
 211
 212static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq,
 213                struct msi_msg *msg)
 214{
 215        /*
 216         * We set vector == 0 to tell the hypervisor we don't care about
 217         * it, but we want a pirq setup instead.  We use the dest_id fields
 218         * to pass the pirq that we want.
 219         */
 220        memset(msg, 0, sizeof(*msg));
 221        msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
 222        msg->arch_addr_hi.destid_8_31 = pirq >> 8;
 223        msg->arch_addr_lo.destid_0_7 = pirq & 0xFF;
 224        msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW;
 225        msg->arch_data.delivery_mode = APIC_DELIVERY_MODE_EXTINT;
 226}
 227
 228static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
 229{
 230        int irq, pirq;
 231        struct msi_desc *msidesc;
 232        struct msi_msg msg;
 233
 234        if (type == PCI_CAP_ID_MSI && nvec > 1)
 235                return 1;
 236
 237        for_each_pci_msi_entry(msidesc, dev) {
 238                pirq = xen_allocate_pirq_msi(dev, msidesc);
 239                if (pirq < 0) {
 240                        irq = -ENODEV;
 241                        goto error;
 242                }
 243                xen_msi_compose_msg(dev, pirq, &msg);
 244                __pci_write_msi_msg(msidesc, &msg);
 245                dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
 246                irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq,
 247                                               (type == PCI_CAP_ID_MSI) ? nvec : 1,
 248                                               (type == PCI_CAP_ID_MSIX) ?
 249                                               "msi-x" : "msi",
 250                                               DOMID_SELF);
 251                if (irq < 0)
 252                        goto error;
 253                dev_dbg(&dev->dev,
 254                        "xen: msi --> pirq=%d --> irq=%d\n", pirq, irq);
 255        }
 256        return 0;
 257
 258error:
 259        dev_err(&dev->dev, "Failed to create MSI%s! ret=%d!\n",
 260                type == PCI_CAP_ID_MSI ? "" : "-X", irq);
 261        return irq;
 262}
 263
 264#ifdef CONFIG_XEN_PV_DOM0
 265static bool __read_mostly pci_seg_supported = true;
 266
 267static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
 268{
 269        int ret = 0;
 270        struct msi_desc *msidesc;
 271
 272        for_each_pci_msi_entry(msidesc, dev) {
 273                struct physdev_map_pirq map_irq;
 274                domid_t domid;
 275
 276                domid = ret = xen_find_device_domain_owner(dev);
 277                /* N.B. Casting int's -ENODEV to uint16_t results in 0xFFED,
 278                 * hence check ret value for < 0. */
 279                if (ret < 0)
 280                        domid = DOMID_SELF;
 281
 282                memset(&map_irq, 0, sizeof(map_irq));
 283                map_irq.domid = domid;
 284                map_irq.type = MAP_PIRQ_TYPE_MSI_SEG;
 285                map_irq.index = -1;
 286                map_irq.pirq = -1;
 287                map_irq.bus = dev->bus->number |
 288                              (pci_domain_nr(dev->bus) << 16);
 289                map_irq.devfn = dev->devfn;
 290
 291                if (type == PCI_CAP_ID_MSI && nvec > 1) {
 292                        map_irq.type = MAP_PIRQ_TYPE_MULTI_MSI;
 293                        map_irq.entry_nr = nvec;
 294                } else if (type == PCI_CAP_ID_MSIX) {
 295                        int pos;
 296                        unsigned long flags;
 297                        u32 table_offset, bir;
 298
 299                        pos = dev->msix_cap;
 300                        pci_read_config_dword(dev, pos + PCI_MSIX_TABLE,
 301                                              &table_offset);
 302                        bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
 303                        flags = pci_resource_flags(dev, bir);
 304                        if (!flags || (flags & IORESOURCE_UNSET))
 305                                return -EINVAL;
 306
 307                        map_irq.table_base = pci_resource_start(dev, bir);
 308                        map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
 309                }
 310
 311                ret = -EINVAL;
 312                if (pci_seg_supported)
 313                        ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq,
 314                                                    &map_irq);
 315                if (type == PCI_CAP_ID_MSI && nvec > 1 && ret) {
 316                        /*
 317                         * If MAP_PIRQ_TYPE_MULTI_MSI is not available
 318                         * there's nothing else we can do in this case.
 319                         * Just set ret > 0 so driver can retry with
 320                         * single MSI.
 321                         */
 322                        ret = 1;
 323                        goto out;
 324                }
 325                if (ret == -EINVAL && !pci_domain_nr(dev->bus)) {
 326                        map_irq.type = MAP_PIRQ_TYPE_MSI;
 327                        map_irq.index = -1;
 328                        map_irq.pirq = -1;
 329                        map_irq.bus = dev->bus->number;
 330                        ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq,
 331                                                    &map_irq);
 332                        if (ret != -EINVAL)
 333                                pci_seg_supported = false;
 334                }
 335                if (ret) {
 336                        dev_warn(&dev->dev, "xen map irq failed %d for %d domain\n",
 337                                 ret, domid);
 338                        goto out;
 339                }
 340
 341                ret = xen_bind_pirq_msi_to_irq(dev, msidesc, map_irq.pirq,
 342                                               (type == PCI_CAP_ID_MSI) ? nvec : 1,
 343                                               (type == PCI_CAP_ID_MSIX) ? "msi-x" : "msi",
 344                                               domid);
 345                if (ret < 0)
 346                        goto out;
 347        }
 348        ret = 0;
 349out:
 350        return ret;
 351}
 352
 353static void xen_initdom_restore_msi_irqs(struct pci_dev *dev)
 354{
 355        int ret = 0;
 356
 357        if (pci_seg_supported) {
 358                struct physdev_pci_device restore_ext;
 359
 360                restore_ext.seg = pci_domain_nr(dev->bus);
 361                restore_ext.bus = dev->bus->number;
 362                restore_ext.devfn = dev->devfn;
 363                ret = HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi_ext,
 364                                        &restore_ext);
 365                if (ret == -ENOSYS)
 366                        pci_seg_supported = false;
 367                WARN(ret && ret != -ENOSYS, "restore_msi_ext -> %d\n", ret);
 368        }
 369        if (!pci_seg_supported) {
 370                struct physdev_restore_msi restore;
 371
 372                restore.bus = dev->bus->number;
 373                restore.devfn = dev->devfn;
 374                ret = HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi, &restore);
 375                WARN(ret && ret != -ENOSYS, "restore_msi -> %d\n", ret);
 376        }
 377}
 378#else /* CONFIG_XEN_PV_DOM0 */
 379#define xen_initdom_setup_msi_irqs      NULL
 380#define xen_initdom_restore_msi_irqs    NULL
 381#endif /* !CONFIG_XEN_PV_DOM0 */
 382
 383static void xen_teardown_msi_irqs(struct pci_dev *dev)
 384{
 385        struct msi_desc *msidesc;
 386        int i;
 387
 388        for_each_pci_msi_entry(msidesc, dev) {
 389                if (msidesc->irq) {
 390                        for (i = 0; i < msidesc->nvec_used; i++)
 391                                xen_destroy_irq(msidesc->irq + i);
 392                }
 393        }
 394}
 395
 396static void xen_pv_teardown_msi_irqs(struct pci_dev *dev)
 397{
 398        struct msi_desc *msidesc = first_pci_msi_entry(dev);
 399
 400        if (msidesc->msi_attrib.is_msix)
 401                xen_pci_frontend_disable_msix(dev);
 402        else
 403                xen_pci_frontend_disable_msi(dev);
 404
 405        xen_teardown_msi_irqs(dev);
 406}
 407
 408static int xen_msi_domain_alloc_irqs(struct irq_domain *domain,
 409                                     struct device *dev,  int nvec)
 410{
 411        int type;
 412
 413        if (WARN_ON_ONCE(!dev_is_pci(dev)))
 414                return -EINVAL;
 415
 416        if (first_msi_entry(dev)->msi_attrib.is_msix)
 417                type = PCI_CAP_ID_MSIX;
 418        else
 419                type = PCI_CAP_ID_MSI;
 420
 421        return xen_msi_ops.setup_msi_irqs(to_pci_dev(dev), nvec, type);
 422}
 423
 424static void xen_msi_domain_free_irqs(struct irq_domain *domain,
 425                                     struct device *dev)
 426{
 427        if (WARN_ON_ONCE(!dev_is_pci(dev)))
 428                return;
 429
 430        xen_msi_ops.teardown_msi_irqs(to_pci_dev(dev));
 431}
 432
 433static struct msi_domain_ops xen_pci_msi_domain_ops = {
 434        .domain_alloc_irqs      = xen_msi_domain_alloc_irqs,
 435        .domain_free_irqs       = xen_msi_domain_free_irqs,
 436};
 437
 438static struct msi_domain_info xen_pci_msi_domain_info = {
 439        .ops                    = &xen_pci_msi_domain_ops,
 440};
 441
 442/*
 443 * This irq domain is a blatant violation of the irq domain design, but
 444 * distangling XEN into real irq domains is not a job for mere mortals with
 445 * limited XENology. But it's the least dangerous way for a mere mortal to
 446 * get rid of the arch_*_msi_irqs() hackery in order to store the irq
 447 * domain pointer in struct device. This irq domain wrappery allows to do
 448 * that without breaking XEN terminally.
 449 */
 450static __init struct irq_domain *xen_create_pci_msi_domain(void)
 451{
 452        struct irq_domain *d = NULL;
 453        struct fwnode_handle *fn;
 454
 455        fn = irq_domain_alloc_named_fwnode("XEN-MSI");
 456        if (fn)
 457                d = msi_create_irq_domain(fn, &xen_pci_msi_domain_info, NULL);
 458
 459        /* FIXME: No idea how to survive if this fails */
 460        BUG_ON(!d);
 461
 462        return d;
 463}
 464
 465static __init void xen_setup_pci_msi(void)
 466{
 467        if (xen_pv_domain()) {
 468                if (xen_initial_domain()) {
 469                        xen_msi_ops.setup_msi_irqs = xen_initdom_setup_msi_irqs;
 470                        x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs;
 471                } else {
 472                        xen_msi_ops.setup_msi_irqs = xen_setup_msi_irqs;
 473                }
 474                xen_msi_ops.teardown_msi_irqs = xen_pv_teardown_msi_irqs;
 475                pci_msi_ignore_mask = 1;
 476        } else if (xen_hvm_domain()) {
 477                xen_msi_ops.setup_msi_irqs = xen_hvm_setup_msi_irqs;
 478                xen_msi_ops.teardown_msi_irqs = xen_teardown_msi_irqs;
 479        } else {
 480                WARN_ON_ONCE(1);
 481                return;
 482        }
 483
 484        /*
 485         * Override the PCI/MSI irq domain init function. No point
 486         * in allocating the native domain and never use it.
 487         */
 488        x86_init.irqs.create_pci_msi_domain = xen_create_pci_msi_domain;
 489}
 490
 491#else /* CONFIG_PCI_MSI */
 492static inline void xen_setup_pci_msi(void) { }
 493#endif /* CONFIG_PCI_MSI */
 494
 495int __init pci_xen_init(void)
 496{
 497        if (!xen_pv_domain() || xen_initial_domain())
 498                return -ENODEV;
 499
 500        printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n");
 501
 502        pcibios_set_cache_line_size();
 503
 504        pcibios_enable_irq = xen_pcifront_enable_irq;
 505        pcibios_disable_irq = NULL;
 506
 507        /* Keep ACPI out of the picture */
 508        acpi_noirq_set();
 509
 510        xen_setup_pci_msi();
 511        return 0;
 512}
 513
 514#ifdef CONFIG_PCI_MSI
 515static void __init xen_hvm_msi_init(void)
 516{
 517        if (!disable_apic) {
 518                /*
 519                 * If hardware supports (x2)APIC virtualization (as indicated
 520                 * by hypervisor's leaf 4) then we don't need to use pirqs/
 521                 * event channels for MSI handling and instead use regular
 522                 * APIC processing
 523                 */
 524                uint32_t eax = cpuid_eax(xen_cpuid_base() + 4);
 525
 526                if (((eax & XEN_HVM_CPUID_X2APIC_VIRT) && x2apic_mode) ||
 527                    ((eax & XEN_HVM_CPUID_APIC_ACCESS_VIRT) && boot_cpu_has(X86_FEATURE_APIC)))
 528                        return;
 529        }
 530        xen_setup_pci_msi();
 531}
 532#endif
 533
 534int __init pci_xen_hvm_init(void)
 535{
 536        if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs))
 537                return 0;
 538
 539#ifdef CONFIG_ACPI
 540        /*
 541         * We don't want to change the actual ACPI delivery model,
 542         * just how GSIs get registered.
 543         */
 544        __acpi_register_gsi = acpi_register_gsi_xen_hvm;
 545        __acpi_unregister_gsi = NULL;
 546#endif
 547
 548#ifdef CONFIG_PCI_MSI
 549        /*
 550         * We need to wait until after x2apic is initialized
 551         * before we can set MSI IRQ ops.
 552         */
 553        x86_platform.apic_post_init = xen_hvm_msi_init;
 554#endif
 555        return 0;
 556}
 557
 558#ifdef CONFIG_XEN_PV_DOM0
 559int __init pci_xen_initial_domain(void)
 560{
 561        int irq;
 562
 563        xen_setup_pci_msi();
 564        __acpi_register_gsi = acpi_register_gsi_xen;
 565        __acpi_unregister_gsi = NULL;
 566        /*
 567         * Pre-allocate the legacy IRQs.  Use NR_LEGACY_IRQS here
 568         * because we don't have a PIC and thus nr_legacy_irqs() is zero.
 569         */
 570        for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
 571                int trigger, polarity;
 572
 573                if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
 574                        continue;
 575
 576                xen_register_pirq(irq,
 577                        trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE,
 578                        true /* Map GSI to PIRQ */);
 579        }
 580        if (0 == nr_ioapics) {
 581                for (irq = 0; irq < nr_legacy_irqs(); irq++)
 582                        xen_bind_pirq_gsi_to_irq(irq, irq, 0, "xt-pic");
 583        }
 584        return 0;
 585}
 586#endif
 587
 588#ifdef CONFIG_XEN_DOM0
 589
 590struct xen_device_domain_owner {
 591        domid_t domain;
 592        struct pci_dev *dev;
 593        struct list_head list;
 594};
 595
 596static DEFINE_SPINLOCK(dev_domain_list_spinlock);
 597static struct list_head dev_domain_list = LIST_HEAD_INIT(dev_domain_list);
 598
 599static struct xen_device_domain_owner *find_device(struct pci_dev *dev)
 600{
 601        struct xen_device_domain_owner *owner;
 602
 603        list_for_each_entry(owner, &dev_domain_list, list) {
 604                if (owner->dev == dev)
 605                        return owner;
 606        }
 607        return NULL;
 608}
 609
 610int xen_find_device_domain_owner(struct pci_dev *dev)
 611{
 612        struct xen_device_domain_owner *owner;
 613        int domain = -ENODEV;
 614
 615        spin_lock(&dev_domain_list_spinlock);
 616        owner = find_device(dev);
 617        if (owner)
 618                domain = owner->domain;
 619        spin_unlock(&dev_domain_list_spinlock);
 620        return domain;
 621}
 622EXPORT_SYMBOL_GPL(xen_find_device_domain_owner);
 623
 624int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain)
 625{
 626        struct xen_device_domain_owner *owner;
 627
 628        owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL);
 629        if (!owner)
 630                return -ENODEV;
 631
 632        spin_lock(&dev_domain_list_spinlock);
 633        if (find_device(dev)) {
 634                spin_unlock(&dev_domain_list_spinlock);
 635                kfree(owner);
 636                return -EEXIST;
 637        }
 638        owner->domain = domain;
 639        owner->dev = dev;
 640        list_add_tail(&owner->list, &dev_domain_list);
 641        spin_unlock(&dev_domain_list_spinlock);
 642        return 0;
 643}
 644EXPORT_SYMBOL_GPL(xen_register_device_domain_owner);
 645
 646int xen_unregister_device_domain_owner(struct pci_dev *dev)
 647{
 648        struct xen_device_domain_owner *owner;
 649
 650        spin_lock(&dev_domain_list_spinlock);
 651        owner = find_device(dev);
 652        if (!owner) {
 653                spin_unlock(&dev_domain_list_spinlock);
 654                return -ENODEV;
 655        }
 656        list_del(&owner->list);
 657        spin_unlock(&dev_domain_list_spinlock);
 658        kfree(owner);
 659        return 0;
 660}
 661EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner);
 662#endif /* CONFIG_XEN_DOM0 */
 663