linux/drivers/video/fbdev/via/via-core.c
<<
>>
Prefs
   1/*
   2 * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved.
   3 * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
   4 * Copyright 2009 Jonathan Corbet <corbet@lwn.net>
   5 */
   6
   7/*
   8 * Core code for the Via multifunction framebuffer device.
   9 */
  10#include <linux/via-core.h>
  11#include <linux/via_i2c.h>
  12#include <linux/via-gpio.h>
  13#include "global.h"
  14
  15#include <linux/module.h>
  16#include <linux/interrupt.h>
  17#include <linux/platform_device.h>
  18#include <linux/list.h>
  19#include <linux/pm.h>
  20#include <asm/olpc.h>
  21
  22/*
  23 * The default port config.
  24 */
  25static struct via_port_cfg adap_configs[] = {
  26        [VIA_PORT_26]   = { VIA_PORT_I2C,  VIA_MODE_I2C, VIASR, 0x26 },
  27        [VIA_PORT_31]   = { VIA_PORT_I2C,  VIA_MODE_I2C, VIASR, 0x31 },
  28        [VIA_PORT_25]   = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x25 },
  29        [VIA_PORT_2C]   = { VIA_PORT_GPIO, VIA_MODE_I2C, VIASR, 0x2c },
  30        [VIA_PORT_3D]   = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x3d },
  31        { 0, 0, 0, 0 }
  32};
  33
  34/*
  35 * The OLPC XO-1.5 puts the camera power and reset lines onto
  36 * GPIO 2C.
  37 */
  38static struct via_port_cfg olpc_adap_configs[] = {
  39        [VIA_PORT_26]   = { VIA_PORT_I2C,  VIA_MODE_I2C, VIASR, 0x26 },
  40        [VIA_PORT_31]   = { VIA_PORT_I2C,  VIA_MODE_I2C, VIASR, 0x31 },
  41        [VIA_PORT_25]   = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x25 },
  42        [VIA_PORT_2C]   = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x2c },
  43        [VIA_PORT_3D]   = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x3d },
  44        { 0, 0, 0, 0 }
  45};
  46
  47/*
  48 * We currently only support one viafb device (will there ever be
  49 * more than one?), so just declare it globally here.
  50 */
  51static struct viafb_dev global_dev;
  52
  53
  54/*
  55 * Basic register access; spinlock required.
  56 */
  57static inline void viafb_mmio_write(int reg, u32 v)
  58{
  59        iowrite32(v, global_dev.engine_mmio + reg);
  60}
  61
  62static inline int viafb_mmio_read(int reg)
  63{
  64        return ioread32(global_dev.engine_mmio + reg);
  65}
  66
  67/* ---------------------------------------------------------------------- */
  68/*
  69 * Interrupt management.  We have a single IRQ line for a lot of
  70 * different functions, so we need to share it.  The design here
  71 * is that we don't want to reimplement the shared IRQ code here;
  72 * we also want to avoid having contention for a single handler thread.
  73 * So each subdev driver which needs interrupts just requests
  74 * them directly from the kernel.  We just have what's needed for
  75 * overall access to the interrupt control register.
  76 */
  77
  78/*
  79 * Which interrupts are enabled now?
  80 */
  81static u32 viafb_enabled_ints;
  82
  83static void viafb_int_init(void)
  84{
  85        viafb_enabled_ints = 0;
  86
  87        viafb_mmio_write(VDE_INTERRUPT, 0);
  88}
  89
  90/*
  91 * Allow subdevs to ask for specific interrupts to be enabled.  These
  92 * functions must be called with reg_lock held
  93 */
  94void viafb_irq_enable(u32 mask)
  95{
  96        viafb_enabled_ints |= mask;
  97        viafb_mmio_write(VDE_INTERRUPT, viafb_enabled_ints | VDE_I_ENABLE);
  98}
  99EXPORT_SYMBOL_GPL(viafb_irq_enable);
 100
 101void viafb_irq_disable(u32 mask)
 102{
 103        viafb_enabled_ints &= ~mask;
 104        if (viafb_enabled_ints == 0)
 105                viafb_mmio_write(VDE_INTERRUPT, 0);  /* Disable entirely */
 106        else
 107                viafb_mmio_write(VDE_INTERRUPT,
 108                                viafb_enabled_ints | VDE_I_ENABLE);
 109}
 110EXPORT_SYMBOL_GPL(viafb_irq_disable);
 111
 112/* ---------------------------------------------------------------------- */
 113/*
 114 * Currently, the camera driver is the only user of the DMA code, so we
 115 * only compile it in if the camera driver is being built.  Chances are,
 116 * most viafb systems will not need to have this extra code for a while.
 117 * As soon as another user comes long, the ifdef can be removed.
 118 */
 119#if defined(CONFIG_VIDEO_VIA_CAMERA) || defined(CONFIG_VIDEO_VIA_CAMERA_MODULE)
 120/*
 121 * Access to the DMA engine.  This currently provides what the camera
 122 * driver needs (i.e. outgoing only) but is easily expandable if need
 123 * be.
 124 */
 125
 126/*
 127 * There are four DMA channels in the vx855.  For now, we only
 128 * use one of them, though.  Most of the time, the DMA channel
 129 * will be idle, so we keep the IRQ handler unregistered except
 130 * when some subsystem has indicated an interest.
 131 */
 132static int viafb_dma_users;
 133static DECLARE_COMPLETION(viafb_dma_completion);
 134/*
 135 * This mutex protects viafb_dma_users and our global interrupt
 136 * registration state; it also serializes access to the DMA
 137 * engine.
 138 */
 139static DEFINE_MUTEX(viafb_dma_lock);
 140
 141/*
 142 * The VX855 DMA descriptor (used for s/g transfers) looks
 143 * like this.
 144 */
 145struct viafb_vx855_dma_descr {
 146        u32     addr_low;       /* Low part of phys addr */
 147        u32     addr_high;      /* High 12 bits of addr */
 148        u32     fb_offset;      /* Offset into FB memory */
 149        u32     seg_size;       /* Size, 16-byte units */
 150        u32     tile_mode;      /* "tile mode" setting */
 151        u32     next_desc_low;  /* Next descriptor addr */
 152        u32     next_desc_high;
 153        u32     pad;            /* Fill out to 64 bytes */
 154};
 155
 156/*
 157 * Flags added to the "next descriptor low" pointers
 158 */
 159#define VIAFB_DMA_MAGIC         0x01  /* ??? Just has to be there */
 160#define VIAFB_DMA_FINAL_SEGMENT 0x02  /* Final segment */
 161
 162/*
 163 * The completion IRQ handler.
 164 */
 165static irqreturn_t viafb_dma_irq(int irq, void *data)
 166{
 167        int csr;
 168        irqreturn_t ret = IRQ_NONE;
 169
 170        spin_lock(&global_dev.reg_lock);
 171        csr = viafb_mmio_read(VDMA_CSR0);
 172        if (csr & VDMA_C_DONE) {
 173                viafb_mmio_write(VDMA_CSR0, VDMA_C_DONE);
 174                complete(&viafb_dma_completion);
 175                ret = IRQ_HANDLED;
 176        }
 177        spin_unlock(&global_dev.reg_lock);
 178        return ret;
 179}
 180
 181/*
 182 * Indicate a need for DMA functionality.
 183 */
 184int viafb_request_dma(void)
 185{
 186        int ret = 0;
 187
 188        /*
 189         * Only VX855 is supported currently.
 190         */
 191        if (global_dev.chip_type != UNICHROME_VX855)
 192                return -ENODEV;
 193        /*
 194         * Note the new user and set up our interrupt handler
 195         * if need be.
 196         */
 197        mutex_lock(&viafb_dma_lock);
 198        viafb_dma_users++;
 199        if (viafb_dma_users == 1) {
 200                ret = request_irq(global_dev.pdev->irq, viafb_dma_irq,
 201                                IRQF_SHARED, "via-dma", &viafb_dma_users);
 202                if (ret)
 203                        viafb_dma_users--;
 204                else
 205                        viafb_irq_enable(VDE_I_DMA0TDEN);
 206        }
 207        mutex_unlock(&viafb_dma_lock);
 208        return ret;
 209}
 210EXPORT_SYMBOL_GPL(viafb_request_dma);
 211
 212void viafb_release_dma(void)
 213{
 214        mutex_lock(&viafb_dma_lock);
 215        viafb_dma_users--;
 216        if (viafb_dma_users == 0) {
 217                viafb_irq_disable(VDE_I_DMA0TDEN);
 218                free_irq(global_dev.pdev->irq, &viafb_dma_users);
 219        }
 220        mutex_unlock(&viafb_dma_lock);
 221}
 222EXPORT_SYMBOL_GPL(viafb_release_dma);
 223
 224
 225#if 0
 226/*
 227 * Copy a single buffer from FB memory, synchronously.  This code works
 228 * but is not currently used.
 229 */
 230void viafb_dma_copy_out(unsigned int offset, dma_addr_t paddr, int len)
 231{
 232        unsigned long flags;
 233        int csr;
 234
 235        mutex_lock(&viafb_dma_lock);
 236        init_completion(&viafb_dma_completion);
 237        /*
 238         * Program the controller.
 239         */
 240        spin_lock_irqsave(&global_dev.reg_lock, flags);
 241        viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_DONE);
 242        /* Enable ints; must happen after CSR0 write! */
 243        viafb_mmio_write(VDMA_MR0, VDMA_MR_TDIE);
 244        viafb_mmio_write(VDMA_MARL0, (int) (paddr & 0xfffffff0));
 245        viafb_mmio_write(VDMA_MARH0, (int) ((paddr >> 28) & 0xfff));
 246        /* Data sheet suggests DAR0 should be <<4, but it lies */
 247        viafb_mmio_write(VDMA_DAR0, offset);
 248        viafb_mmio_write(VDMA_DQWCR0, len >> 4);
 249        viafb_mmio_write(VDMA_TMR0, 0);
 250        viafb_mmio_write(VDMA_DPRL0, 0);
 251        viafb_mmio_write(VDMA_DPRH0, 0);
 252        viafb_mmio_write(VDMA_PMR0, 0);
 253        csr = viafb_mmio_read(VDMA_CSR0);
 254        viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_START);
 255        spin_unlock_irqrestore(&global_dev.reg_lock, flags);
 256        /*
 257         * Now we just wait until the interrupt handler says
 258         * we're done.
 259         */
 260        wait_for_completion_interruptible(&viafb_dma_completion);
 261        viafb_mmio_write(VDMA_MR0, 0); /* Reset int enable */
 262        mutex_unlock(&viafb_dma_lock);
 263}
 264EXPORT_SYMBOL_GPL(viafb_dma_copy_out);
 265#endif
 266
 267/*
 268 * Do a scatter/gather DMA copy from FB memory.  You must have done
 269 * a successful call to viafb_request_dma() first.
 270 */
 271int viafb_dma_copy_out_sg(unsigned int offset, struct scatterlist *sg, int nsg)
 272{
 273        struct viafb_vx855_dma_descr *descr;
 274        void *descrpages;
 275        dma_addr_t descr_handle;
 276        unsigned long flags;
 277        int i;
 278        struct scatterlist *sgentry;
 279        dma_addr_t nextdesc;
 280
 281        /*
 282         * Get a place to put the descriptors.
 283         */
 284        descrpages = dma_alloc_coherent(&global_dev.pdev->dev,
 285                        nsg*sizeof(struct viafb_vx855_dma_descr),
 286                        &descr_handle, GFP_KERNEL);
 287        if (descrpages == NULL) {
 288                dev_err(&global_dev.pdev->dev, "Unable to get descr page.\n");
 289                return -ENOMEM;
 290        }
 291        mutex_lock(&viafb_dma_lock);
 292        /*
 293         * Fill them in.
 294         */
 295        descr = descrpages;
 296        nextdesc = descr_handle + sizeof(struct viafb_vx855_dma_descr);
 297        for_each_sg(sg, sgentry, nsg, i) {
 298                dma_addr_t paddr = sg_dma_address(sgentry);
 299                descr->addr_low = paddr & 0xfffffff0;
 300                descr->addr_high = ((u64) paddr >> 32) & 0x0fff;
 301                descr->fb_offset = offset;
 302                descr->seg_size = sg_dma_len(sgentry) >> 4;
 303                descr->tile_mode = 0;
 304                descr->next_desc_low = (nextdesc&0xfffffff0) | VIAFB_DMA_MAGIC;
 305                descr->next_desc_high = ((u64) nextdesc >> 32) & 0x0fff;
 306                descr->pad = 0xffffffff;  /* VIA driver does this */
 307                offset += sg_dma_len(sgentry);
 308                nextdesc += sizeof(struct viafb_vx855_dma_descr);
 309                descr++;
 310        }
 311        descr[-1].next_desc_low = VIAFB_DMA_FINAL_SEGMENT|VIAFB_DMA_MAGIC;
 312        /*
 313         * Program the engine.
 314         */
 315        spin_lock_irqsave(&global_dev.reg_lock, flags);
 316        init_completion(&viafb_dma_completion);
 317        viafb_mmio_write(VDMA_DQWCR0, 0);
 318        viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_DONE);
 319        viafb_mmio_write(VDMA_MR0, VDMA_MR_TDIE | VDMA_MR_CHAIN);
 320        viafb_mmio_write(VDMA_DPRL0, descr_handle | VIAFB_DMA_MAGIC);
 321        viafb_mmio_write(VDMA_DPRH0,
 322                        (((u64)descr_handle >> 32) & 0x0fff) | 0xf0000);
 323        (void) viafb_mmio_read(VDMA_CSR0);
 324        viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_START);
 325        spin_unlock_irqrestore(&global_dev.reg_lock, flags);
 326        /*
 327         * Now we just wait until the interrupt handler says
 328         * we're done.  Except that, actually, we need to wait a little
 329         * longer: the interrupts seem to jump the gun a little and we
 330         * get corrupted frames sometimes.
 331         */
 332        wait_for_completion_timeout(&viafb_dma_completion, 1);
 333        msleep(1);
 334        if ((viafb_mmio_read(VDMA_CSR0)&VDMA_C_DONE) == 0)
 335                printk(KERN_ERR "VIA DMA timeout!\n");
 336        /*
 337         * Clean up and we're done.
 338         */
 339        viafb_mmio_write(VDMA_CSR0, VDMA_C_DONE);
 340        viafb_mmio_write(VDMA_MR0, 0); /* Reset int enable */
 341        mutex_unlock(&viafb_dma_lock);
 342        dma_free_coherent(&global_dev.pdev->dev,
 343                        nsg*sizeof(struct viafb_vx855_dma_descr), descrpages,
 344                        descr_handle);
 345        return 0;
 346}
 347EXPORT_SYMBOL_GPL(viafb_dma_copy_out_sg);
 348#endif /* CONFIG_VIDEO_VIA_CAMERA */
 349
 350/* ---------------------------------------------------------------------- */
 351/*
 352 * Figure out how big our framebuffer memory is.  Kind of ugly,
 353 * but evidently we can't trust the information found in the
 354 * fbdev configuration area.
 355 */
 356static u16 via_function3[] = {
 357        CLE266_FUNCTION3, KM400_FUNCTION3, CN400_FUNCTION3, CN700_FUNCTION3,
 358        CX700_FUNCTION3, KM800_FUNCTION3, KM890_FUNCTION3, P4M890_FUNCTION3,
 359        P4M900_FUNCTION3, VX800_FUNCTION3, VX855_FUNCTION3, VX900_FUNCTION3,
 360};
 361
 362/* Get the BIOS-configured framebuffer size from PCI configuration space
 363 * of function 3 in the respective chipset */
 364static int viafb_get_fb_size_from_pci(int chip_type)
 365{
 366        int i;
 367        u8 offset = 0;
 368        u32 FBSize;
 369        u32 VideoMemSize;
 370
 371        /* search for the "FUNCTION3" device in this chipset */
 372        for (i = 0; i < ARRAY_SIZE(via_function3); i++) {
 373                struct pci_dev *pdev;
 374
 375                pdev = pci_get_device(PCI_VENDOR_ID_VIA, via_function3[i],
 376                                      NULL);
 377                if (!pdev)
 378                        continue;
 379
 380                DEBUG_MSG(KERN_INFO "Device ID = %x\n", pdev->device);
 381
 382                switch (pdev->device) {
 383                case CLE266_FUNCTION3:
 384                case KM400_FUNCTION3:
 385                        offset = 0xE0;
 386                        break;
 387                case CN400_FUNCTION3:
 388                case CN700_FUNCTION3:
 389                case CX700_FUNCTION3:
 390                case KM800_FUNCTION3:
 391                case KM890_FUNCTION3:
 392                case P4M890_FUNCTION3:
 393                case P4M900_FUNCTION3:
 394                case VX800_FUNCTION3:
 395                case VX855_FUNCTION3:
 396                case VX900_FUNCTION3:
 397                /*case CN750_FUNCTION3: */
 398                        offset = 0xA0;
 399                        break;
 400                }
 401
 402                if (!offset)
 403                        break;
 404
 405                pci_read_config_dword(pdev, offset, &FBSize);
 406                pci_dev_put(pdev);
 407        }
 408
 409        if (!offset) {
 410                printk(KERN_ERR "cannot determine framebuffer size\n");
 411                return -EIO;
 412        }
 413
 414        FBSize = FBSize & 0x00007000;
 415        DEBUG_MSG(KERN_INFO "FB Size = %x\n", FBSize);
 416
 417        if (chip_type < UNICHROME_CX700) {
 418                switch (FBSize) {
 419                case 0x00004000:
 420                        VideoMemSize = (16 << 20);      /*16M */
 421                        break;
 422
 423                case 0x00005000:
 424                        VideoMemSize = (32 << 20);      /*32M */
 425                        break;
 426
 427                case 0x00006000:
 428                        VideoMemSize = (64 << 20);      /*64M */
 429                        break;
 430
 431                default:
 432                        VideoMemSize = (32 << 20);      /*32M */
 433                        break;
 434                }
 435        } else {
 436                switch (FBSize) {
 437                case 0x00001000:
 438                        VideoMemSize = (8 << 20);       /*8M */
 439                        break;
 440
 441                case 0x00002000:
 442                        VideoMemSize = (16 << 20);      /*16M */
 443                        break;
 444
 445                case 0x00003000:
 446                        VideoMemSize = (32 << 20);      /*32M */
 447                        break;
 448
 449                case 0x00004000:
 450                        VideoMemSize = (64 << 20);      /*64M */
 451                        break;
 452
 453                case 0x00005000:
 454                        VideoMemSize = (128 << 20);     /*128M */
 455                        break;
 456
 457                case 0x00006000:
 458                        VideoMemSize = (256 << 20);     /*256M */
 459                        break;
 460
 461                case 0x00007000:        /* Only on VX855/875 */
 462                        VideoMemSize = (512 << 20);     /*512M */
 463                        break;
 464
 465                default:
 466                        VideoMemSize = (32 << 20);      /*32M */
 467                        break;
 468                }
 469        }
 470
 471        return VideoMemSize;
 472}
 473
 474
 475/*
 476 * Figure out and map our MMIO regions.
 477 */
 478static int via_pci_setup_mmio(struct viafb_dev *vdev)
 479{
 480        int ret;
 481        /*
 482         * Hook up to the device registers.  Note that we soldier
 483         * on if it fails; the framebuffer can operate (without
 484         * acceleration) without this region.
 485         */
 486        vdev->engine_start = pci_resource_start(vdev->pdev, 1);
 487        vdev->engine_len = pci_resource_len(vdev->pdev, 1);
 488        vdev->engine_mmio = ioremap_nocache(vdev->engine_start,
 489                        vdev->engine_len);
 490        if (vdev->engine_mmio == NULL)
 491                dev_err(&vdev->pdev->dev,
 492                                "Unable to map engine MMIO; operation will be "
 493                                "slow and crippled.\n");
 494        /*
 495         * Map in framebuffer memory.  For now, failure here is
 496         * fatal.  Unfortunately, in the absence of significant
 497         * vmalloc space, failure here is also entirely plausible.
 498         * Eventually we want to move away from mapping this
 499         * entire region.
 500         */
 501        if (vdev->chip_type == UNICHROME_VX900)
 502                vdev->fbmem_start = pci_resource_start(vdev->pdev, 2);
 503        else
 504                vdev->fbmem_start = pci_resource_start(vdev->pdev, 0);
 505        ret = vdev->fbmem_len = viafb_get_fb_size_from_pci(vdev->chip_type);
 506        if (ret < 0)
 507                goto out_unmap;
 508
 509        /* try to map less memory on failure, 8 MB should be still enough */
 510        for (; vdev->fbmem_len >= 8 << 20; vdev->fbmem_len /= 2) {
 511                vdev->fbmem = ioremap_wc(vdev->fbmem_start, vdev->fbmem_len);
 512                if (vdev->fbmem)
 513                        break;
 514        }
 515
 516        if (vdev->fbmem == NULL) {
 517                ret = -ENOMEM;
 518                goto out_unmap;
 519        }
 520        return 0;
 521out_unmap:
 522        iounmap(vdev->engine_mmio);
 523        return ret;
 524}
 525
 526static void via_pci_teardown_mmio(struct viafb_dev *vdev)
 527{
 528        iounmap(vdev->fbmem);
 529        iounmap(vdev->engine_mmio);
 530}
 531
 532/*
 533 * Create our subsidiary devices.
 534 */
 535static struct viafb_subdev_info {
 536        char *name;
 537        struct platform_device *platdev;
 538} viafb_subdevs[] = {
 539        {
 540                .name = "viafb-gpio",
 541        },
 542        {
 543                .name = "viafb-i2c",
 544        },
 545#if defined(CONFIG_VIDEO_VIA_CAMERA) || defined(CONFIG_VIDEO_VIA_CAMERA_MODULE)
 546        {
 547                .name = "viafb-camera",
 548        },
 549#endif
 550};
 551#define N_SUBDEVS ARRAY_SIZE(viafb_subdevs)
 552
 553static int via_create_subdev(struct viafb_dev *vdev,
 554                             struct viafb_subdev_info *info)
 555{
 556        int ret;
 557
 558        info->platdev = platform_device_alloc(info->name, -1);
 559        if (!info->platdev) {
 560                dev_err(&vdev->pdev->dev, "Unable to allocate pdev %s\n",
 561                        info->name);
 562                return -ENOMEM;
 563        }
 564        info->platdev->dev.parent = &vdev->pdev->dev;
 565        info->platdev->dev.platform_data = vdev;
 566        ret = platform_device_add(info->platdev);
 567        if (ret) {
 568                dev_err(&vdev->pdev->dev, "Unable to add pdev %s\n",
 569                                info->name);
 570                platform_device_put(info->platdev);
 571                info->platdev = NULL;
 572        }
 573        return ret;
 574}
 575
 576static int via_setup_subdevs(struct viafb_dev *vdev)
 577{
 578        int i;
 579
 580        /*
 581         * Ignore return values.  Even if some of the devices
 582         * fail to be created, we'll still be able to use some
 583         * of the rest.
 584         */
 585        for (i = 0; i < N_SUBDEVS; i++)
 586                via_create_subdev(vdev, viafb_subdevs + i);
 587        return 0;
 588}
 589
 590static void via_teardown_subdevs(void)
 591{
 592        int i;
 593
 594        for (i = 0; i < N_SUBDEVS; i++)
 595                if (viafb_subdevs[i].platdev) {
 596                        viafb_subdevs[i].platdev->dev.platform_data = NULL;
 597                        platform_device_unregister(viafb_subdevs[i].platdev);
 598                }
 599}
 600
 601/*
 602 * Power management functions
 603 */
 604#ifdef CONFIG_PM
 605static LIST_HEAD(viafb_pm_hooks);
 606static DEFINE_MUTEX(viafb_pm_hooks_lock);
 607
 608void viafb_pm_register(struct viafb_pm_hooks *hooks)
 609{
 610        INIT_LIST_HEAD(&hooks->list);
 611
 612        mutex_lock(&viafb_pm_hooks_lock);
 613        list_add_tail(&hooks->list, &viafb_pm_hooks);
 614        mutex_unlock(&viafb_pm_hooks_lock);
 615}
 616EXPORT_SYMBOL_GPL(viafb_pm_register);
 617
 618void viafb_pm_unregister(struct viafb_pm_hooks *hooks)
 619{
 620        mutex_lock(&viafb_pm_hooks_lock);
 621        list_del(&hooks->list);
 622        mutex_unlock(&viafb_pm_hooks_lock);
 623}
 624EXPORT_SYMBOL_GPL(viafb_pm_unregister);
 625
 626static int via_suspend(struct pci_dev *pdev, pm_message_t state)
 627{
 628        struct viafb_pm_hooks *hooks;
 629
 630        if (state.event != PM_EVENT_SUSPEND)
 631                return 0;
 632        /*
 633         * "I've occasionally hit a few drivers that caused suspend
 634         * failures, and each and every time it was a driver bug, and
 635         * the right thing to do was to just ignore the error and suspend
 636         * anyway - returning an error code and trying to undo the suspend
 637         * is not what anybody ever really wants, even if our model
 638         *_allows_ for it."
 639         * -- Linus Torvalds, Dec. 7, 2009
 640         */
 641        mutex_lock(&viafb_pm_hooks_lock);
 642        list_for_each_entry_reverse(hooks, &viafb_pm_hooks, list)
 643                hooks->suspend(hooks->private);
 644        mutex_unlock(&viafb_pm_hooks_lock);
 645
 646        pci_save_state(pdev);
 647        pci_disable_device(pdev);
 648        pci_set_power_state(pdev, pci_choose_state(pdev, state));
 649        return 0;
 650}
 651
 652static int via_resume(struct pci_dev *pdev)
 653{
 654        struct viafb_pm_hooks *hooks;
 655
 656        /* Get the bus side powered up */
 657        pci_set_power_state(pdev, PCI_D0);
 658        pci_restore_state(pdev);
 659        if (pci_enable_device(pdev))
 660                return 0;
 661
 662        pci_set_master(pdev);
 663
 664        /* Now bring back any subdevs */
 665        mutex_lock(&viafb_pm_hooks_lock);
 666        list_for_each_entry(hooks, &viafb_pm_hooks, list)
 667                hooks->resume(hooks->private);
 668        mutex_unlock(&viafb_pm_hooks_lock);
 669
 670        return 0;
 671}
 672#endif /* CONFIG_PM */
 673
 674static int via_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 675{
 676        int ret;
 677
 678        ret = pci_enable_device(pdev);
 679        if (ret)
 680                return ret;
 681
 682        /*
 683         * Global device initialization.
 684         */
 685        memset(&global_dev, 0, sizeof(global_dev));
 686        global_dev.pdev = pdev;
 687        global_dev.chip_type = ent->driver_data;
 688        global_dev.port_cfg = adap_configs;
 689        if (machine_is_olpc())
 690                global_dev.port_cfg = olpc_adap_configs;
 691
 692        spin_lock_init(&global_dev.reg_lock);
 693        ret = via_pci_setup_mmio(&global_dev);
 694        if (ret)
 695                goto out_disable;
 696        /*
 697         * Set up interrupts and create our subdevices.  Continue even if
 698         * some things fail.
 699         */
 700        viafb_int_init();
 701        via_setup_subdevs(&global_dev);
 702        /*
 703         * Set up the framebuffer device
 704         */
 705        ret = via_fb_pci_probe(&global_dev);
 706        if (ret)
 707                goto out_subdevs;
 708        return 0;
 709
 710out_subdevs:
 711        via_teardown_subdevs();
 712        via_pci_teardown_mmio(&global_dev);
 713out_disable:
 714        pci_disable_device(pdev);
 715        return ret;
 716}
 717
 718static void via_pci_remove(struct pci_dev *pdev)
 719{
 720        via_teardown_subdevs();
 721        via_fb_pci_remove(pdev);
 722        via_pci_teardown_mmio(&global_dev);
 723        pci_disable_device(pdev);
 724}
 725
 726
 727static struct pci_device_id via_pci_table[] = {
 728        { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CLE266_DID),
 729          .driver_data = UNICHROME_CLE266 },
 730        { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K400_DID),
 731          .driver_data = UNICHROME_K400 },
 732        { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K800_DID),
 733          .driver_data = UNICHROME_K800 },
 734        { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_PM800_DID),
 735          .driver_data = UNICHROME_PM800 },
 736        { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CN700_DID),
 737          .driver_data = UNICHROME_CN700 },
 738        { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CX700_DID),
 739          .driver_data = UNICHROME_CX700 },
 740        { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CN750_DID),
 741          .driver_data = UNICHROME_CN750 },
 742        { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K8M890_DID),
 743          .driver_data = UNICHROME_K8M890 },
 744        { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M890_DID),
 745          .driver_data = UNICHROME_P4M890 },
 746        { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M900_DID),
 747          .driver_data = UNICHROME_P4M900 },
 748        { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX800_DID),
 749          .driver_data = UNICHROME_VX800 },
 750        { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX855_DID),
 751          .driver_data = UNICHROME_VX855 },
 752        { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX900_DID),
 753          .driver_data = UNICHROME_VX900 },
 754        { }
 755};
 756MODULE_DEVICE_TABLE(pci, via_pci_table);
 757
 758static struct pci_driver via_driver = {
 759        .name           = "viafb",
 760        .id_table       = via_pci_table,
 761        .probe          = via_pci_probe,
 762        .remove         = via_pci_remove,
 763#ifdef CONFIG_PM
 764        .suspend        = via_suspend,
 765        .resume         = via_resume,
 766#endif
 767};
 768
 769static int __init via_core_init(void)
 770{
 771        int ret;
 772
 773        ret = viafb_init();
 774        if (ret)
 775                return ret;
 776        viafb_i2c_init();
 777        viafb_gpio_init();
 778        return pci_register_driver(&via_driver);
 779}
 780
 781static void __exit via_core_exit(void)
 782{
 783        pci_unregister_driver(&via_driver);
 784        viafb_gpio_exit();
 785        viafb_i2c_exit();
 786        viafb_exit();
 787}
 788
 789module_init(via_core_init);
 790module_exit(via_core_exit);
 791