linux/drivers/gpu/drm/drm_pci.c
<<
>>
Prefs
   1/*
   2 * Copyright 2003 José Fonseca.
   3 * Copyright 2003 Leif Delgass.
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice (including the next
  14 * paragraph) shall be included in all copies or substantial portions of the
  15 * Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
  20 * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  21 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  22 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  23 */
  24
  25#include <linux/pci.h>
  26#include <linux/slab.h>
  27#include <linux/dma-mapping.h>
  28#include <linux/export.h>
  29#include <drm/drm_pci.h>
  30#include <drm/drmP.h>
  31#include "drm_internal.h"
  32#include "drm_legacy.h"
  33
  34/**
  35 * drm_pci_alloc - Allocate a PCI consistent memory block, for DMA.
  36 * @dev: DRM device
  37 * @size: size of block to allocate
  38 * @align: alignment of block
  39 *
  40 * FIXME: This is a needless abstraction of the Linux dma-api and should be
  41 * removed.
  42 *
  43 * Return: A handle to the allocated memory block on success or NULL on
  44 * failure.
  45 */
  46drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
  47{
  48        drm_dma_handle_t *dmah;
  49        unsigned long addr;
  50        size_t sz;
  51
  52        /* pci_alloc_consistent only guarantees alignment to the smallest
  53         * PAGE_SIZE order which is greater than or equal to the requested size.
  54         * Return NULL here for now to make sure nobody tries for larger alignment
  55         */
  56        if (align > size)
  57                return NULL;
  58
  59        dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
  60        if (!dmah)
  61                return NULL;
  62
  63        dmah->size = size;
  64        dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
  65
  66        if (dmah->vaddr == NULL) {
  67                kfree(dmah);
  68                return NULL;
  69        }
  70
  71        memset(dmah->vaddr, 0, size);
  72
  73        /* XXX - Is virt_to_page() legal for consistent mem? */
  74        /* Reserve */
  75        for (addr = (unsigned long)dmah->vaddr, sz = size;
  76             sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
  77                SetPageReserved(virt_to_page((void *)addr));
  78        }
  79
  80        return dmah;
  81}
  82
  83EXPORT_SYMBOL(drm_pci_alloc);
  84
  85/*
  86 * Free a PCI consistent memory block without freeing its descriptor.
  87 *
  88 * This function is for internal use in the Linux-specific DRM core code.
  89 */
  90void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
  91{
  92        unsigned long addr;
  93        size_t sz;
  94
  95        if (dmah->vaddr) {
  96                /* XXX - Is virt_to_page() legal for consistent mem? */
  97                /* Unreserve */
  98                for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
  99                     sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
 100                        ClearPageReserved(virt_to_page((void *)addr));
 101                }
 102                dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
 103                                  dmah->busaddr);
 104        }
 105}
 106
 107/**
 108 * drm_pci_free - Free a PCI consistent memory block
 109 * @dev: DRM device
 110 * @dmah: handle to memory block
 111 *
 112 * FIXME: This is a needless abstraction of the Linux dma-api and should be
 113 * removed.
 114 */
 115void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
 116{
 117        __drm_legacy_pci_free(dev, dmah);
 118        kfree(dmah);
 119}
 120
 121EXPORT_SYMBOL(drm_pci_free);
 122
 123#ifdef CONFIG_PCI
 124
 125static int drm_get_pci_domain(struct drm_device *dev)
 126{
 127#ifndef __alpha__
 128        /* For historical reasons, drm_get_pci_domain() is busticated
 129         * on most archs and has to remain so for userspace interface
 130         * < 1.4, except on alpha which was right from the beginning
 131         */
 132        if (dev->if_version < 0x10004)
 133                return 0;
 134#endif /* __alpha__ */
 135
 136        return pci_domain_nr(dev->pdev->bus);
 137}
 138
 139int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
 140{
 141        master->unique = kasprintf(GFP_KERNEL, "pci:%04x:%02x:%02x.%d",
 142                                        drm_get_pci_domain(dev),
 143                                        dev->pdev->bus->number,
 144                                        PCI_SLOT(dev->pdev->devfn),
 145                                        PCI_FUNC(dev->pdev->devfn));
 146        if (!master->unique)
 147                return -ENOMEM;
 148
 149        master->unique_len = strlen(master->unique);
 150        return 0;
 151}
 152
 153static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
 154{
 155        if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
 156            (p->busnum & 0xff) != dev->pdev->bus->number ||
 157            p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
 158                return -EINVAL;
 159
 160        p->irq = dev->pdev->irq;
 161
 162        DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
 163                  p->irq);
 164        return 0;
 165}
 166
 167/**
 168 * drm_irq_by_busid - Get interrupt from bus ID
 169 * @dev: DRM device
 170 * @data: IOCTL parameter pointing to a drm_irq_busid structure
 171 * @file_priv: DRM file private.
 172 *
 173 * Finds the PCI device with the specified bus id and gets its IRQ number.
 174 * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
 175 * to that of the device that this DRM instance attached to.
 176 *
 177 * Return: 0 on success or a negative error code on failure.
 178 */
 179int drm_irq_by_busid(struct drm_device *dev, void *data,
 180                     struct drm_file *file_priv)
 181{
 182        struct drm_irq_busid *p = data;
 183
 184        if (!drm_core_check_feature(dev, DRIVER_LEGACY))
 185                return -EINVAL;
 186
 187        /* UMS was only ever support on PCI devices. */
 188        if (WARN_ON(!dev->pdev))
 189                return -EINVAL;
 190
 191        if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
 192                return -EINVAL;
 193
 194        return drm_pci_irq_by_busid(dev, p);
 195}
 196
 197static void drm_pci_agp_init(struct drm_device *dev)
 198{
 199        if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
 200                if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
 201                        dev->agp = drm_agp_init(dev);
 202                if (dev->agp) {
 203                        dev->agp->agp_mtrr = arch_phys_wc_add(
 204                                dev->agp->agp_info.aper_base,
 205                                dev->agp->agp_info.aper_size *
 206                                1024 * 1024);
 207                }
 208        }
 209}
 210
 211void drm_pci_agp_destroy(struct drm_device *dev)
 212{
 213        if (dev->agp) {
 214                arch_phys_wc_del(dev->agp->agp_mtrr);
 215                drm_legacy_agp_clear(dev);
 216                kfree(dev->agp);
 217                dev->agp = NULL;
 218        }
 219}
 220
 221/**
 222 * drm_get_pci_dev - Register a PCI device with the DRM subsystem
 223 * @pdev: PCI device
 224 * @ent: entry from the PCI ID table that matches @pdev
 225 * @driver: DRM device driver
 226 *
 227 * Attempt to gets inter module "drm" information. If we are first
 228 * then register the character device and inter module information.
 229 * Try and register, if we fail to register, backout previous work.
 230 *
 231 * NOTE: This function is deprecated, please use drm_dev_alloc() and
 232 * drm_dev_register() instead and remove your &drm_driver.load callback.
 233 *
 234 * Return: 0 on success or a negative error code on failure.
 235 */
 236int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
 237                    struct drm_driver *driver)
 238{
 239        struct drm_device *dev;
 240        int ret;
 241
 242        DRM_DEBUG("\n");
 243
 244        dev = drm_dev_alloc(driver, &pdev->dev);
 245        if (IS_ERR(dev))
 246                return PTR_ERR(dev);
 247
 248        ret = pci_enable_device(pdev);
 249        if (ret)
 250                goto err_free;
 251
 252        dev->pdev = pdev;
 253#ifdef __alpha__
 254        dev->hose = pdev->sysdata;
 255#endif
 256
 257        if (drm_core_check_feature(dev, DRIVER_MODESET))
 258                pci_set_drvdata(pdev, dev);
 259
 260        drm_pci_agp_init(dev);
 261
 262        ret = drm_dev_register(dev, ent->driver_data);
 263        if (ret)
 264                goto err_agp;
 265
 266        /* No locking needed since shadow-attach is single-threaded since it may
 267         * only be called from the per-driver module init hook. */
 268        if (drm_core_check_feature(dev, DRIVER_LEGACY))
 269                list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list);
 270
 271        return 0;
 272
 273err_agp:
 274        drm_pci_agp_destroy(dev);
 275        pci_disable_device(pdev);
 276err_free:
 277        drm_dev_put(dev);
 278        return ret;
 279}
 280EXPORT_SYMBOL(drm_get_pci_dev);
 281
 282/**
 283 * drm_legacy_pci_init - shadow-attach a legacy DRM PCI driver
 284 * @driver: DRM device driver
 285 * @pdriver: PCI device driver
 286 *
 287 * This is only used by legacy dri1 drivers and deprecated.
 288 *
 289 * Return: 0 on success or a negative error code on failure.
 290 */
 291int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
 292{
 293        struct pci_dev *pdev = NULL;
 294        const struct pci_device_id *pid;
 295        int i;
 296
 297        DRM_DEBUG("\n");
 298
 299        if (WARN_ON(!(driver->driver_features & DRIVER_LEGACY)))
 300                return -EINVAL;
 301
 302        /* If not using KMS, fall back to stealth mode manual scanning. */
 303        INIT_LIST_HEAD(&driver->legacy_dev_list);
 304        for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
 305                pid = &pdriver->id_table[i];
 306
 307                /* Loop around setting up a DRM device for each PCI device
 308                 * matching our ID and device class.  If we had the internal
 309                 * function that pci_get_subsys and pci_get_class used, we'd
 310                 * be able to just pass pid in instead of doing a two-stage
 311                 * thing.
 312                 */
 313                pdev = NULL;
 314                while ((pdev =
 315                        pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
 316                                       pid->subdevice, pdev)) != NULL) {
 317                        if ((pdev->class & pid->class_mask) != pid->class)
 318                                continue;
 319
 320                        /* stealth mode requires a manual probe */
 321                        pci_dev_get(pdev);
 322                        drm_get_pci_dev(pdev, pid, driver);
 323                }
 324        }
 325        return 0;
 326}
 327EXPORT_SYMBOL(drm_legacy_pci_init);
 328
 329int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
 330{
 331        struct pci_dev *root;
 332        u32 lnkcap, lnkcap2;
 333
 334        *mask = 0;
 335        if (!dev->pdev)
 336                return -EINVAL;
 337
 338        root = dev->pdev->bus->self;
 339
 340        /* we've been informed via and serverworks don't make the cut */
 341        if (root->vendor == PCI_VENDOR_ID_VIA ||
 342            root->vendor == PCI_VENDOR_ID_SERVERWORKS)
 343                return -EINVAL;
 344
 345        pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
 346        pcie_capability_read_dword(root, PCI_EXP_LNKCAP2, &lnkcap2);
 347
 348        if (lnkcap2) {  /* PCIe r3.0-compliant */
 349                if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
 350                        *mask |= DRM_PCIE_SPEED_25;
 351                if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
 352                        *mask |= DRM_PCIE_SPEED_50;
 353                if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
 354                        *mask |= DRM_PCIE_SPEED_80;
 355        } else {        /* pre-r3.0 */
 356                if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
 357                        *mask |= DRM_PCIE_SPEED_25;
 358                if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
 359                        *mask |= (DRM_PCIE_SPEED_25 | DRM_PCIE_SPEED_50);
 360        }
 361
 362        DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
 363        return 0;
 364}
 365EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
 366
 367int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw)
 368{
 369        struct pci_dev *root;
 370        u32 lnkcap;
 371
 372        *mlw = 0;
 373        if (!dev->pdev)
 374                return -EINVAL;
 375
 376        root = dev->pdev->bus->self;
 377
 378        pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
 379
 380        *mlw = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
 381
 382        DRM_INFO("probing mlw for device %x:%x = %x\n", root->vendor, root->device, lnkcap);
 383        return 0;
 384}
 385EXPORT_SYMBOL(drm_pcie_get_max_link_width);
 386
 387#else
 388
 389void drm_pci_agp_destroy(struct drm_device *dev) {}
 390
 391int drm_irq_by_busid(struct drm_device *dev, void *data,
 392                     struct drm_file *file_priv)
 393{
 394        return -EINVAL;
 395}
 396#endif
 397
 398/**
 399 * drm_legacy_pci_exit - unregister shadow-attach legacy DRM driver
 400 * @driver: DRM device driver
 401 * @pdriver: PCI device driver
 402 *
 403 * Unregister a DRM driver shadow-attached through drm_legacy_pci_init(). This
 404 * is deprecated and only used by dri1 drivers.
 405 */
 406void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
 407{
 408        struct drm_device *dev, *tmp;
 409        DRM_DEBUG("\n");
 410
 411        if (!(driver->driver_features & DRIVER_LEGACY)) {
 412                WARN_ON(1);
 413        } else {
 414                list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
 415                                         legacy_dev_list) {
 416                        list_del(&dev->legacy_dev_list);
 417                        drm_put_dev(dev);
 418                }
 419        }
 420        DRM_INFO("Module unloaded\n");
 421}
 422EXPORT_SYMBOL(drm_legacy_pci_exit);
 423