linux/drivers/gpio/gpio-omap.c
<<
>>
Prefs
   1/*
   2 * Support functions for OMAP GPIO
   3 *
   4 * Copyright (C) 2003-2005 Nokia Corporation
   5 * Written by Juha Yrjölä <juha.yrjola@nokia.com>
   6 *
   7 * Copyright (C) 2009 Texas Instruments
   8 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation.
  13 */
  14
  15#include <linux/init.h>
  16#include <linux/module.h>
  17#include <linux/interrupt.h>
  18#include <linux/syscore_ops.h>
  19#include <linux/err.h>
  20#include <linux/clk.h>
  21#include <linux/io.h>
  22#include <linux/device.h>
  23#include <linux/pm_runtime.h>
  24#include <linux/pm.h>
  25#include <linux/of.h>
  26#include <linux/of_device.h>
  27#include <linux/irqdomain.h>
  28
  29#include <mach/hardware.h>
  30#include <asm/irq.h>
  31#include <mach/irqs.h>
  32#include <asm/gpio.h>
  33#include <asm/mach/irq.h>
  34
  35#define OFF_MODE        1
  36
  37static LIST_HEAD(omap_gpio_list);
  38
  39struct gpio_regs {
  40        u32 irqenable1;
  41        u32 irqenable2;
  42        u32 wake_en;
  43        u32 ctrl;
  44        u32 oe;
  45        u32 leveldetect0;
  46        u32 leveldetect1;
  47        u32 risingdetect;
  48        u32 fallingdetect;
  49        u32 dataout;
  50        u32 debounce;
  51        u32 debounce_en;
  52};
  53
  54struct gpio_bank {
  55        struct list_head node;
  56        void __iomem *base;
  57        u16 irq;
  58        int irq_base;
  59        struct irq_domain *domain;
  60        u32 suspend_wakeup;
  61        u32 saved_wakeup;
  62        u32 non_wakeup_gpios;
  63        u32 enabled_non_wakeup_gpios;
  64        struct gpio_regs context;
  65        u32 saved_datain;
  66        u32 saved_fallingdetect;
  67        u32 saved_risingdetect;
  68        u32 level_mask;
  69        u32 toggle_mask;
  70        spinlock_t lock;
  71        struct gpio_chip chip;
  72        struct clk *dbck;
  73        u32 mod_usage;
  74        u32 dbck_enable_mask;
  75        bool dbck_enabled;
  76        struct device *dev;
  77        bool is_mpuio;
  78        bool dbck_flag;
  79        bool loses_context;
  80        int stride;
  81        u32 width;
  82        int context_loss_count;
  83        int power_mode;
  84        bool workaround_enabled;
  85
  86        void (*set_dataout)(struct gpio_bank *bank, int gpio, int enable);
  87        int (*get_context_loss_count)(struct device *dev);
  88
  89        struct omap_gpio_reg_offs *regs;
  90};
  91
  92#define GPIO_INDEX(bank, gpio) (gpio % bank->width)
  93#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
  94#define GPIO_MOD_CTRL_BIT       BIT(0)
  95
  96static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
  97{
  98        return gpio_irq - bank->irq_base + bank->chip.base;
  99}
 100
 101static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
 102{
 103        void __iomem *reg = bank->base;
 104        u32 l;
 105
 106        reg += bank->regs->direction;
 107        l = __raw_readl(reg);
 108        if (is_input)
 109                l |= 1 << gpio;
 110        else
 111                l &= ~(1 << gpio);
 112        __raw_writel(l, reg);
 113        bank->context.oe = l;
 114}
 115
 116
 117/* set data out value using dedicate set/clear register */
 118static void _set_gpio_dataout_reg(struct gpio_bank *bank, int gpio, int enable)
 119{
 120        void __iomem *reg = bank->base;
 121        u32 l = GPIO_BIT(bank, gpio);
 122
 123        if (enable) {
 124                reg += bank->regs->set_dataout;
 125                bank->context.dataout |= l;
 126        } else {
 127                reg += bank->regs->clr_dataout;
 128                bank->context.dataout &= ~l;
 129        }
 130
 131        __raw_writel(l, reg);
 132}
 133
 134/* set data out value using mask register */
 135static void _set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, int enable)
 136{
 137        void __iomem *reg = bank->base + bank->regs->dataout;
 138        u32 gpio_bit = GPIO_BIT(bank, gpio);
 139        u32 l;
 140
 141        l = __raw_readl(reg);
 142        if (enable)
 143                l |= gpio_bit;
 144        else
 145                l &= ~gpio_bit;
 146        __raw_writel(l, reg);
 147        bank->context.dataout = l;
 148}
 149
 150static int _get_gpio_datain(struct gpio_bank *bank, int offset)
 151{
 152        void __iomem *reg = bank->base + bank->regs->datain;
 153
 154        return (__raw_readl(reg) & (1 << offset)) != 0;
 155}
 156
 157static int _get_gpio_dataout(struct gpio_bank *bank, int offset)
 158{
 159        void __iomem *reg = bank->base + bank->regs->dataout;
 160
 161        return (__raw_readl(reg) & (1 << offset)) != 0;
 162}
 163
 164static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
 165{
 166        int l = __raw_readl(base + reg);
 167
 168        if (set)
 169                l |= mask;
 170        else
 171                l &= ~mask;
 172
 173        __raw_writel(l, base + reg);
 174}
 175
 176static inline void _gpio_dbck_enable(struct gpio_bank *bank)
 177{
 178        if (bank->dbck_enable_mask && !bank->dbck_enabled) {
 179                clk_enable(bank->dbck);
 180                bank->dbck_enabled = true;
 181        }
 182}
 183
 184static inline void _gpio_dbck_disable(struct gpio_bank *bank)
 185{
 186        if (bank->dbck_enable_mask && bank->dbck_enabled) {
 187                clk_disable(bank->dbck);
 188                bank->dbck_enabled = false;
 189        }
 190}
 191
 192/**
 193 * _set_gpio_debounce - low level gpio debounce time
 194 * @bank: the gpio bank we're acting upon
 195 * @gpio: the gpio number on this @gpio
 196 * @debounce: debounce time to use
 197 *
 198 * OMAP's debounce time is in 31us steps so we need
 199 * to convert and round up to the closest unit.
 200 */
 201static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
 202                unsigned debounce)
 203{
 204        void __iomem            *reg;
 205        u32                     val;
 206        u32                     l;
 207
 208        if (!bank->dbck_flag)
 209                return;
 210
 211        if (debounce < 32)
 212                debounce = 0x01;
 213        else if (debounce > 7936)
 214                debounce = 0xff;
 215        else
 216                debounce = (debounce / 0x1f) - 1;
 217
 218        l = GPIO_BIT(bank, gpio);
 219
 220        clk_enable(bank->dbck);
 221        reg = bank->base + bank->regs->debounce;
 222        __raw_writel(debounce, reg);
 223
 224        reg = bank->base + bank->regs->debounce_en;
 225        val = __raw_readl(reg);
 226
 227        if (debounce)
 228                val |= l;
 229        else
 230                val &= ~l;
 231        bank->dbck_enable_mask = val;
 232
 233        __raw_writel(val, reg);
 234        clk_disable(bank->dbck);
 235        /*
 236         * Enable debounce clock per module.
 237         * This call is mandatory because in omap_gpio_request() when
 238         * *_runtime_get_sync() is called,  _gpio_dbck_enable() within
 239         * runtime callbck fails to turn on dbck because dbck_enable_mask
 240         * used within _gpio_dbck_enable() is still not initialized at
 241         * that point. Therefore we have to enable dbck here.
 242         */
 243        _gpio_dbck_enable(bank);
 244        if (bank->dbck_enable_mask) {
 245                bank->context.debounce = debounce;
 246                bank->context.debounce_en = val;
 247        }
 248}
 249
 250static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio,
 251                                                unsigned trigger)
 252{
 253        void __iomem *base = bank->base;
 254        u32 gpio_bit = 1 << gpio;
 255
 256        _gpio_rmw(base, bank->regs->leveldetect0, gpio_bit,
 257                  trigger & IRQ_TYPE_LEVEL_LOW);
 258        _gpio_rmw(base, bank->regs->leveldetect1, gpio_bit,
 259                  trigger & IRQ_TYPE_LEVEL_HIGH);
 260        _gpio_rmw(base, bank->regs->risingdetect, gpio_bit,
 261                  trigger & IRQ_TYPE_EDGE_RISING);
 262        _gpio_rmw(base, bank->regs->fallingdetect, gpio_bit,
 263                  trigger & IRQ_TYPE_EDGE_FALLING);
 264
 265        bank->context.leveldetect0 =
 266                        __raw_readl(bank->base + bank->regs->leveldetect0);
 267        bank->context.leveldetect1 =
 268                        __raw_readl(bank->base + bank->regs->leveldetect1);
 269        bank->context.risingdetect =
 270                        __raw_readl(bank->base + bank->regs->risingdetect);
 271        bank->context.fallingdetect =
 272                        __raw_readl(bank->base + bank->regs->fallingdetect);
 273
 274        if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
 275                _gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
 276                bank->context.wake_en =
 277                        __raw_readl(bank->base + bank->regs->wkup_en);
 278        }
 279
 280        /* This part needs to be executed always for OMAP{34xx, 44xx} */
 281        if (!bank->regs->irqctrl) {
 282                /* On omap24xx proceed only when valid GPIO bit is set */
 283                if (bank->non_wakeup_gpios) {
 284                        if (!(bank->non_wakeup_gpios & gpio_bit))
 285                                goto exit;
 286                }
 287
 288                /*
 289                 * Log the edge gpio and manually trigger the IRQ
 290                 * after resume if the input level changes
 291                 * to avoid irq lost during PER RET/OFF mode
 292                 * Applies for omap2 non-wakeup gpio and all omap3 gpios
 293                 */
 294                if (trigger & IRQ_TYPE_EDGE_BOTH)
 295                        bank->enabled_non_wakeup_gpios |= gpio_bit;
 296                else
 297                        bank->enabled_non_wakeup_gpios &= ~gpio_bit;
 298        }
 299
 300exit:
 301        bank->level_mask =
 302                __raw_readl(bank->base + bank->regs->leveldetect0) |
 303                __raw_readl(bank->base + bank->regs->leveldetect1);
 304}
 305
 306#ifdef CONFIG_ARCH_OMAP1
 307/*
 308 * This only applies to chips that can't do both rising and falling edge
 309 * detection at once.  For all other chips, this function is a noop.
 310 */
 311static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
 312{
 313        void __iomem *reg = bank->base;
 314        u32 l = 0;
 315
 316        if (!bank->regs->irqctrl)
 317                return;
 318
 319        reg += bank->regs->irqctrl;
 320
 321        l = __raw_readl(reg);
 322        if ((l >> gpio) & 1)
 323                l &= ~(1 << gpio);
 324        else
 325                l |= 1 << gpio;
 326
 327        __raw_writel(l, reg);
 328}
 329#else
 330static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {}
 331#endif
 332
 333static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
 334                                                        unsigned trigger)
 335{
 336        void __iomem *reg = bank->base;
 337        void __iomem *base = bank->base;
 338        u32 l = 0;
 339
 340        if (bank->regs->leveldetect0 && bank->regs->wkup_en) {
 341                set_gpio_trigger(bank, gpio, trigger);
 342        } else if (bank->regs->irqctrl) {
 343                reg += bank->regs->irqctrl;
 344
 345                l = __raw_readl(reg);
 346                if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
 347                        bank->toggle_mask |= 1 << gpio;
 348                if (trigger & IRQ_TYPE_EDGE_RISING)
 349                        l |= 1 << gpio;
 350                else if (trigger & IRQ_TYPE_EDGE_FALLING)
 351                        l &= ~(1 << gpio);
 352                else
 353                        return -EINVAL;
 354
 355                __raw_writel(l, reg);
 356        } else if (bank->regs->edgectrl1) {
 357                if (gpio & 0x08)
 358                        reg += bank->regs->edgectrl2;
 359                else
 360                        reg += bank->regs->edgectrl1;
 361
 362                gpio &= 0x07;
 363                l = __raw_readl(reg);
 364                l &= ~(3 << (gpio << 1));
 365                if (trigger & IRQ_TYPE_EDGE_RISING)
 366                        l |= 2 << (gpio << 1);
 367                if (trigger & IRQ_TYPE_EDGE_FALLING)
 368                        l |= 1 << (gpio << 1);
 369
 370                /* Enable wake-up during idle for dynamic tick */
 371                _gpio_rmw(base, bank->regs->wkup_en, 1 << gpio, trigger);
 372                bank->context.wake_en =
 373                        __raw_readl(bank->base + bank->regs->wkup_en);
 374                __raw_writel(l, reg);
 375        }
 376        return 0;
 377}
 378
 379static int gpio_irq_type(struct irq_data *d, unsigned type)
 380{
 381        struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 382        unsigned gpio;
 383        int retval;
 384        unsigned long flags;
 385
 386        if (!cpu_class_is_omap2() && d->irq > IH_MPUIO_BASE)
 387                gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
 388        else
 389                gpio = irq_to_gpio(bank, d->irq);
 390
 391        if (type & ~IRQ_TYPE_SENSE_MASK)
 392                return -EINVAL;
 393
 394        if (!bank->regs->leveldetect0 &&
 395                (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
 396                return -EINVAL;
 397
 398        spin_lock_irqsave(&bank->lock, flags);
 399        retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type);
 400        spin_unlock_irqrestore(&bank->lock, flags);
 401
 402        if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
 403                __irq_set_handler_locked(d->irq, handle_level_irq);
 404        else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
 405                __irq_set_handler_locked(d->irq, handle_edge_irq);
 406
 407        return retval;
 408}
 409
 410static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
 411{
 412        void __iomem *reg = bank->base;
 413
 414        reg += bank->regs->irqstatus;
 415        __raw_writel(gpio_mask, reg);
 416
 417        /* Workaround for clearing DSP GPIO interrupts to allow retention */
 418        if (bank->regs->irqstatus2) {
 419                reg = bank->base + bank->regs->irqstatus2;
 420                __raw_writel(gpio_mask, reg);
 421        }
 422
 423        /* Flush posted write for the irq status to avoid spurious interrupts */
 424        __raw_readl(reg);
 425}
 426
 427static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
 428{
 429        _clear_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
 430}
 431
 432static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
 433{
 434        void __iomem *reg = bank->base;
 435        u32 l;
 436        u32 mask = (1 << bank->width) - 1;
 437
 438        reg += bank->regs->irqenable;
 439        l = __raw_readl(reg);
 440        if (bank->regs->irqenable_inv)
 441                l = ~l;
 442        l &= mask;
 443        return l;
 444}
 445
 446static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
 447{
 448        void __iomem *reg = bank->base;
 449        u32 l;
 450
 451        if (bank->regs->set_irqenable) {
 452                reg += bank->regs->set_irqenable;
 453                l = gpio_mask;
 454                bank->context.irqenable1 |= gpio_mask;
 455        } else {
 456                reg += bank->regs->irqenable;
 457                l = __raw_readl(reg);
 458                if (bank->regs->irqenable_inv)
 459                        l &= ~gpio_mask;
 460                else
 461                        l |= gpio_mask;
 462                bank->context.irqenable1 = l;
 463        }
 464
 465        __raw_writel(l, reg);
 466}
 467
 468static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
 469{
 470        void __iomem *reg = bank->base;
 471        u32 l;
 472
 473        if (bank->regs->clr_irqenable) {
 474                reg += bank->regs->clr_irqenable;
 475                l = gpio_mask;
 476                bank->context.irqenable1 &= ~gpio_mask;
 477        } else {
 478                reg += bank->regs->irqenable;
 479                l = __raw_readl(reg);
 480                if (bank->regs->irqenable_inv)
 481                        l |= gpio_mask;
 482                else
 483                        l &= ~gpio_mask;
 484                bank->context.irqenable1 = l;
 485        }
 486
 487        __raw_writel(l, reg);
 488}
 489
 490static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable)
 491{
 492        if (enable)
 493                _enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
 494        else
 495                _disable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
 496}
 497
 498/*
 499 * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register.
 500 * 1510 does not seem to have a wake-up register. If JTAG is connected
 501 * to the target, system will wake up always on GPIO events. While
 502 * system is running all registered GPIO interrupts need to have wake-up
 503 * enabled. When system is suspended, only selected GPIO interrupts need
 504 * to have wake-up enabled.
 505 */
 506static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
 507{
 508        u32 gpio_bit = GPIO_BIT(bank, gpio);
 509        unsigned long flags;
 510
 511        if (bank->non_wakeup_gpios & gpio_bit) {
 512                dev_err(bank->dev,
 513                        "Unable to modify wakeup on non-wakeup GPIO%d\n", gpio);
 514                return -EINVAL;
 515        }
 516
 517        spin_lock_irqsave(&bank->lock, flags);
 518        if (enable)
 519                bank->suspend_wakeup |= gpio_bit;
 520        else
 521                bank->suspend_wakeup &= ~gpio_bit;
 522
 523        __raw_writel(bank->suspend_wakeup, bank->base + bank->regs->wkup_en);
 524        spin_unlock_irqrestore(&bank->lock, flags);
 525
 526        return 0;
 527}
 528
 529static void _reset_gpio(struct gpio_bank *bank, int gpio)
 530{
 531        _set_gpio_direction(bank, GPIO_INDEX(bank, gpio), 1);
 532        _set_gpio_irqenable(bank, gpio, 0);
 533        _clear_gpio_irqstatus(bank, gpio);
 534        _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
 535}
 536
 537/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
 538static int gpio_wake_enable(struct irq_data *d, unsigned int enable)
 539{
 540        struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 541        unsigned int gpio = irq_to_gpio(bank, d->irq);
 542
 543        return _set_gpio_wakeup(bank, gpio, enable);
 544}
 545
 546static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
 547{
 548        struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
 549        unsigned long flags;
 550
 551        /*
 552         * If this is the first gpio_request for the bank,
 553         * enable the bank module.
 554         */
 555        if (!bank->mod_usage)
 556                pm_runtime_get_sync(bank->dev);
 557
 558        spin_lock_irqsave(&bank->lock, flags);
 559        /* Set trigger to none. You need to enable the desired trigger with
 560         * request_irq() or set_irq_type().
 561         */
 562        _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
 563
 564        if (bank->regs->pinctrl) {
 565                void __iomem *reg = bank->base + bank->regs->pinctrl;
 566
 567                /* Claim the pin for MPU */
 568                __raw_writel(__raw_readl(reg) | (1 << offset), reg);
 569        }
 570
 571        if (bank->regs->ctrl && !bank->mod_usage) {
 572                void __iomem *reg = bank->base + bank->regs->ctrl;
 573                u32 ctrl;
 574
 575                ctrl = __raw_readl(reg);
 576                /* Module is enabled, clocks are not gated */
 577                ctrl &= ~GPIO_MOD_CTRL_BIT;
 578                __raw_writel(ctrl, reg);
 579                bank->context.ctrl = ctrl;
 580        }
 581
 582        bank->mod_usage |= 1 << offset;
 583
 584        spin_unlock_irqrestore(&bank->lock, flags);
 585
 586        return 0;
 587}
 588
 589static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
 590{
 591        struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
 592        void __iomem *base = bank->base;
 593        unsigned long flags;
 594
 595        spin_lock_irqsave(&bank->lock, flags);
 596
 597        if (bank->regs->wkup_en) {
 598                /* Disable wake-up during idle for dynamic tick */
 599                _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
 600                bank->context.wake_en =
 601                        __raw_readl(bank->base + bank->regs->wkup_en);
 602        }
 603
 604        bank->mod_usage &= ~(1 << offset);
 605
 606        if (bank->regs->ctrl && !bank->mod_usage) {
 607                void __iomem *reg = bank->base + bank->regs->ctrl;
 608                u32 ctrl;
 609
 610                ctrl = __raw_readl(reg);
 611                /* Module is disabled, clocks are gated */
 612                ctrl |= GPIO_MOD_CTRL_BIT;
 613                __raw_writel(ctrl, reg);
 614                bank->context.ctrl = ctrl;
 615        }
 616
 617        _reset_gpio(bank, bank->chip.base + offset);
 618        spin_unlock_irqrestore(&bank->lock, flags);
 619
 620        /*
 621         * If this is the last gpio to be freed in the bank,
 622         * disable the bank module.
 623         */
 624        if (!bank->mod_usage)
 625                pm_runtime_put(bank->dev);
 626}
 627
 628/*
 629 * We need to unmask the GPIO bank interrupt as soon as possible to
 630 * avoid missing GPIO interrupts for other lines in the bank.
 631 * Then we need to mask-read-clear-unmask the triggered GPIO lines
 632 * in the bank to avoid missing nested interrupts for a GPIO line.
 633 * If we wait to unmask individual GPIO lines in the bank after the
 634 * line's interrupt handler has been run, we may miss some nested
 635 * interrupts.
 636 */
 637static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
 638{
 639        void __iomem *isr_reg = NULL;
 640        u32 isr;
 641        unsigned int gpio_irq, gpio_index;
 642        struct gpio_bank *bank;
 643        u32 retrigger = 0;
 644        int unmasked = 0;
 645        struct irq_chip *chip = irq_desc_get_chip(desc);
 646
 647        chained_irq_enter(chip, desc);
 648
 649        bank = irq_get_handler_data(irq);
 650        isr_reg = bank->base + bank->regs->irqstatus;
 651        pm_runtime_get_sync(bank->dev);
 652
 653        if (WARN_ON(!isr_reg))
 654                goto exit;
 655
 656        while(1) {
 657                u32 isr_saved, level_mask = 0;
 658                u32 enabled;
 659
 660                enabled = _get_gpio_irqbank_mask(bank);
 661                isr_saved = isr = __raw_readl(isr_reg) & enabled;
 662
 663                if (bank->level_mask)
 664                        level_mask = bank->level_mask & enabled;
 665
 666                /* clear edge sensitive interrupts before handler(s) are
 667                called so that we don't miss any interrupt occurred while
 668                executing them */
 669                _disable_gpio_irqbank(bank, isr_saved & ~level_mask);
 670                _clear_gpio_irqbank(bank, isr_saved & ~level_mask);
 671                _enable_gpio_irqbank(bank, isr_saved & ~level_mask);
 672
 673                /* if there is only edge sensitive GPIO pin interrupts
 674                configured, we could unmask GPIO bank interrupt immediately */
 675                if (!level_mask && !unmasked) {
 676                        unmasked = 1;
 677                        chained_irq_exit(chip, desc);
 678                }
 679
 680                isr |= retrigger;
 681                retrigger = 0;
 682                if (!isr)
 683                        break;
 684
 685                gpio_irq = bank->irq_base;
 686                for (; isr != 0; isr >>= 1, gpio_irq++) {
 687                        int gpio = irq_to_gpio(bank, gpio_irq);
 688
 689                        if (!(isr & 1))
 690                                continue;
 691
 692                        gpio_index = GPIO_INDEX(bank, gpio);
 693
 694                        /*
 695                         * Some chips can't respond to both rising and falling
 696                         * at the same time.  If this irq was requested with
 697                         * both flags, we need to flip the ICR data for the IRQ
 698                         * to respond to the IRQ for the opposite direction.
 699                         * This will be indicated in the bank toggle_mask.
 700                         */
 701                        if (bank->toggle_mask & (1 << gpio_index))
 702                                _toggle_gpio_edge_triggering(bank, gpio_index);
 703
 704                        generic_handle_irq(gpio_irq);
 705                }
 706        }
 707        /* if bank has any level sensitive GPIO pin interrupt
 708        configured, we must unmask the bank interrupt only after
 709        handler(s) are executed in order to avoid spurious bank
 710        interrupt */
 711exit:
 712        if (!unmasked)
 713                chained_irq_exit(chip, desc);
 714        pm_runtime_put(bank->dev);
 715}
 716
 717static void gpio_irq_shutdown(struct irq_data *d)
 718{
 719        struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 720        unsigned int gpio = irq_to_gpio(bank, d->irq);
 721        unsigned long flags;
 722
 723        spin_lock_irqsave(&bank->lock, flags);
 724        _reset_gpio(bank, gpio);
 725        spin_unlock_irqrestore(&bank->lock, flags);
 726}
 727
 728static void gpio_ack_irq(struct irq_data *d)
 729{
 730        struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 731        unsigned int gpio = irq_to_gpio(bank, d->irq);
 732
 733        _clear_gpio_irqstatus(bank, gpio);
 734}
 735
 736static void gpio_mask_irq(struct irq_data *d)
 737{
 738        struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 739        unsigned int gpio = irq_to_gpio(bank, d->irq);
 740        unsigned long flags;
 741
 742        spin_lock_irqsave(&bank->lock, flags);
 743        _set_gpio_irqenable(bank, gpio, 0);
 744        _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
 745        spin_unlock_irqrestore(&bank->lock, flags);
 746}
 747
 748static void gpio_unmask_irq(struct irq_data *d)
 749{
 750        struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 751        unsigned int gpio = irq_to_gpio(bank, d->irq);
 752        unsigned int irq_mask = GPIO_BIT(bank, gpio);
 753        u32 trigger = irqd_get_trigger_type(d);
 754        unsigned long flags;
 755
 756        spin_lock_irqsave(&bank->lock, flags);
 757        if (trigger)
 758                _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger);
 759
 760        /* For level-triggered GPIOs, the clearing must be done after
 761         * the HW source is cleared, thus after the handler has run */
 762        if (bank->level_mask & irq_mask) {
 763                _set_gpio_irqenable(bank, gpio, 0);
 764                _clear_gpio_irqstatus(bank, gpio);
 765        }
 766
 767        _set_gpio_irqenable(bank, gpio, 1);
 768        spin_unlock_irqrestore(&bank->lock, flags);
 769}
 770
 771static struct irq_chip gpio_irq_chip = {
 772        .name           = "GPIO",
 773        .irq_shutdown   = gpio_irq_shutdown,
 774        .irq_ack        = gpio_ack_irq,
 775        .irq_mask       = gpio_mask_irq,
 776        .irq_unmask     = gpio_unmask_irq,
 777        .irq_set_type   = gpio_irq_type,
 778        .irq_set_wake   = gpio_wake_enable,
 779};
 780
 781/*---------------------------------------------------------------------*/
 782
 783static int omap_mpuio_suspend_noirq(struct device *dev)
 784{
 785        struct platform_device *pdev = to_platform_device(dev);
 786        struct gpio_bank        *bank = platform_get_drvdata(pdev);
 787        void __iomem            *mask_reg = bank->base +
 788                                        OMAP_MPUIO_GPIO_MASKIT / bank->stride;
 789        unsigned long           flags;
 790
 791        spin_lock_irqsave(&bank->lock, flags);
 792        bank->saved_wakeup = __raw_readl(mask_reg);
 793        __raw_writel(0xffff & ~bank->suspend_wakeup, mask_reg);
 794        spin_unlock_irqrestore(&bank->lock, flags);
 795
 796        return 0;
 797}
 798
 799static int omap_mpuio_resume_noirq(struct device *dev)
 800{
 801        struct platform_device *pdev = to_platform_device(dev);
 802        struct gpio_bank        *bank = platform_get_drvdata(pdev);
 803        void __iomem            *mask_reg = bank->base +
 804                                        OMAP_MPUIO_GPIO_MASKIT / bank->stride;
 805        unsigned long           flags;
 806
 807        spin_lock_irqsave(&bank->lock, flags);
 808        __raw_writel(bank->saved_wakeup, mask_reg);
 809        spin_unlock_irqrestore(&bank->lock, flags);
 810
 811        return 0;
 812}
 813
 814static const struct dev_pm_ops omap_mpuio_dev_pm_ops = {
 815        .suspend_noirq = omap_mpuio_suspend_noirq,
 816        .resume_noirq = omap_mpuio_resume_noirq,
 817};
 818
 819/* use platform_driver for this. */
 820static struct platform_driver omap_mpuio_driver = {
 821        .driver         = {
 822                .name   = "mpuio",
 823                .pm     = &omap_mpuio_dev_pm_ops,
 824        },
 825};
 826
 827static struct platform_device omap_mpuio_device = {
 828        .name           = "mpuio",
 829        .id             = -1,
 830        .dev = {
 831                .driver = &omap_mpuio_driver.driver,
 832        }
 833        /* could list the /proc/iomem resources */
 834};
 835
 836static inline void mpuio_init(struct gpio_bank *bank)
 837{
 838        platform_set_drvdata(&omap_mpuio_device, bank);
 839
 840        if (platform_driver_register(&omap_mpuio_driver) == 0)
 841                (void) platform_device_register(&omap_mpuio_device);
 842}
 843
 844/*---------------------------------------------------------------------*/
 845
 846static int gpio_input(struct gpio_chip *chip, unsigned offset)
 847{
 848        struct gpio_bank *bank;
 849        unsigned long flags;
 850
 851        bank = container_of(chip, struct gpio_bank, chip);
 852        spin_lock_irqsave(&bank->lock, flags);
 853        _set_gpio_direction(bank, offset, 1);
 854        spin_unlock_irqrestore(&bank->lock, flags);
 855        return 0;
 856}
 857
 858static int gpio_is_input(struct gpio_bank *bank, int mask)
 859{
 860        void __iomem *reg = bank->base + bank->regs->direction;
 861
 862        return __raw_readl(reg) & mask;
 863}
 864
 865static int gpio_get(struct gpio_chip *chip, unsigned offset)
 866{
 867        struct gpio_bank *bank;
 868        u32 mask;
 869
 870        bank = container_of(chip, struct gpio_bank, chip);
 871        mask = (1 << offset);
 872
 873        if (gpio_is_input(bank, mask))
 874                return _get_gpio_datain(bank, offset);
 875        else
 876                return _get_gpio_dataout(bank, offset);
 877}
 878
 879static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
 880{
 881        struct gpio_bank *bank;
 882        unsigned long flags;
 883
 884        bank = container_of(chip, struct gpio_bank, chip);
 885        spin_lock_irqsave(&bank->lock, flags);
 886        bank->set_dataout(bank, offset, value);
 887        _set_gpio_direction(bank, offset, 0);
 888        spin_unlock_irqrestore(&bank->lock, flags);
 889        return 0;
 890}
 891
 892static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
 893                unsigned debounce)
 894{
 895        struct gpio_bank *bank;
 896        unsigned long flags;
 897
 898        bank = container_of(chip, struct gpio_bank, chip);
 899
 900        if (!bank->dbck) {
 901                bank->dbck = clk_get(bank->dev, "dbclk");
 902                if (IS_ERR(bank->dbck))
 903                        dev_err(bank->dev, "Could not get gpio dbck\n");
 904        }
 905
 906        spin_lock_irqsave(&bank->lock, flags);
 907        _set_gpio_debounce(bank, offset, debounce);
 908        spin_unlock_irqrestore(&bank->lock, flags);
 909
 910        return 0;
 911}
 912
 913static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 914{
 915        struct gpio_bank *bank;
 916        unsigned long flags;
 917
 918        bank = container_of(chip, struct gpio_bank, chip);
 919        spin_lock_irqsave(&bank->lock, flags);
 920        bank->set_dataout(bank, offset, value);
 921        spin_unlock_irqrestore(&bank->lock, flags);
 922}
 923
 924static int gpio_2irq(struct gpio_chip *chip, unsigned offset)
 925{
 926        struct gpio_bank *bank;
 927
 928        bank = container_of(chip, struct gpio_bank, chip);
 929        return bank->irq_base + offset;
 930}
 931
 932/*---------------------------------------------------------------------*/
 933
 934static void __init omap_gpio_show_rev(struct gpio_bank *bank)
 935{
 936        static bool called;
 937        u32 rev;
 938
 939        if (called || bank->regs->revision == USHRT_MAX)
 940                return;
 941
 942        rev = __raw_readw(bank->base + bank->regs->revision);
 943        pr_info("OMAP GPIO hardware version %d.%d\n",
 944                (rev >> 4) & 0x0f, rev & 0x0f);
 945
 946        called = true;
 947}
 948
 949/* This lock class tells lockdep that GPIO irqs are in a different
 950 * category than their parents, so it won't report false recursion.
 951 */
 952static struct lock_class_key gpio_lock_class;
 953
 954static void omap_gpio_mod_init(struct gpio_bank *bank)
 955{
 956        void __iomem *base = bank->base;
 957        u32 l = 0xffffffff;
 958
 959        if (bank->width == 16)
 960                l = 0xffff;
 961
 962        if (bank->is_mpuio) {
 963                __raw_writel(l, bank->base + bank->regs->irqenable);
 964                return;
 965        }
 966
 967        _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv);
 968        _gpio_rmw(base, bank->regs->irqstatus, l, !bank->regs->irqenable_inv);
 969        if (bank->regs->debounce_en)
 970                __raw_writel(0, base + bank->regs->debounce_en);
 971
 972        /* Save OE default value (0xffffffff) in the context */
 973        bank->context.oe = __raw_readl(bank->base + bank->regs->direction);
 974         /* Initialize interface clk ungated, module enabled */
 975        if (bank->regs->ctrl)
 976                __raw_writel(0, base + bank->regs->ctrl);
 977}
 978
 979static __devinit void
 980omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
 981                    unsigned int num)
 982{
 983        struct irq_chip_generic *gc;
 984        struct irq_chip_type *ct;
 985
 986        gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base,
 987                                    handle_simple_irq);
 988        if (!gc) {
 989                dev_err(bank->dev, "Memory alloc failed for gc\n");
 990                return;
 991        }
 992
 993        ct = gc->chip_types;
 994
 995        /* NOTE: No ack required, reading IRQ status clears it. */
 996        ct->chip.irq_mask = irq_gc_mask_set_bit;
 997        ct->chip.irq_unmask = irq_gc_mask_clr_bit;
 998        ct->chip.irq_set_type = gpio_irq_type;
 999
1000        if (bank->regs->wkup_en)
1001                ct->chip.irq_set_wake = gpio_wake_enable,
1002
1003        ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride;
1004        irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
1005                               IRQ_NOREQUEST | IRQ_NOPROBE, 0);
1006}
1007
1008static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
1009{
1010        int j;
1011        static int gpio;
1012
1013        /*
1014         * REVISIT eventually switch from OMAP-specific gpio structs
1015         * over to the generic ones
1016         */
1017        bank->chip.request = omap_gpio_request;
1018        bank->chip.free = omap_gpio_free;
1019        bank->chip.direction_input = gpio_input;
1020        bank->chip.get = gpio_get;
1021        bank->chip.direction_output = gpio_output;
1022        bank->chip.set_debounce = gpio_debounce;
1023        bank->chip.set = gpio_set;
1024        bank->chip.to_irq = gpio_2irq;
1025        if (bank->is_mpuio) {
1026                bank->chip.label = "mpuio";
1027                if (bank->regs->wkup_en)
1028                        bank->chip.dev = &omap_mpuio_device.dev;
1029                bank->chip.base = OMAP_MPUIO(0);
1030        } else {
1031                bank->chip.label = "gpio";
1032                bank->chip.base = gpio;
1033                gpio += bank->width;
1034        }
1035        bank->chip.ngpio = bank->width;
1036
1037        gpiochip_add(&bank->chip);
1038
1039        for (j = bank->irq_base; j < bank->irq_base + bank->width; j++) {
1040                irq_set_lockdep_class(j, &gpio_lock_class);
1041                irq_set_chip_data(j, bank);
1042                if (bank->is_mpuio) {
1043                        omap_mpuio_alloc_gc(bank, j, bank->width);
1044                } else {
1045                        irq_set_chip(j, &gpio_irq_chip);
1046                        irq_set_handler(j, handle_simple_irq);
1047                        set_irq_flags(j, IRQF_VALID);
1048                }
1049        }
1050        irq_set_chained_handler(bank->irq, gpio_irq_handler);
1051        irq_set_handler_data(bank->irq, bank);
1052}
1053
1054static const struct of_device_id omap_gpio_match[];
1055
1056static int __devinit omap_gpio_probe(struct platform_device *pdev)
1057{
1058        struct device *dev = &pdev->dev;
1059        struct device_node *node = dev->of_node;
1060        const struct of_device_id *match;
1061        struct omap_gpio_platform_data *pdata;
1062        struct resource *res;
1063        struct gpio_bank *bank;
1064        int ret = 0;
1065
1066        match = of_match_device(of_match_ptr(omap_gpio_match), dev);
1067
1068        pdata = match ? match->data : dev->platform_data;
1069        if (!pdata)
1070                return -EINVAL;
1071
1072        bank = devm_kzalloc(&pdev->dev, sizeof(struct gpio_bank), GFP_KERNEL);
1073        if (!bank) {
1074                dev_err(dev, "Memory alloc failed\n");
1075                return -ENOMEM;
1076        }
1077
1078        res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1079        if (unlikely(!res)) {
1080                dev_err(dev, "Invalid IRQ resource\n");
1081                return -ENODEV;
1082        }
1083
1084        bank->irq = res->start;
1085        bank->dev = dev;
1086        bank->dbck_flag = pdata->dbck_flag;
1087        bank->stride = pdata->bank_stride;
1088        bank->width = pdata->bank_width;
1089        bank->is_mpuio = pdata->is_mpuio;
1090        bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
1091        bank->loses_context = pdata->loses_context;
1092        bank->get_context_loss_count = pdata->get_context_loss_count;
1093        bank->regs = pdata->regs;
1094#ifdef CONFIG_OF_GPIO
1095        bank->chip.of_node = of_node_get(node);
1096#endif
1097
1098        bank->irq_base = irq_alloc_descs(-1, 0, bank->width, 0);
1099        if (bank->irq_base < 0) {
1100                dev_err(dev, "Couldn't allocate IRQ numbers\n");
1101                return -ENODEV;
1102        }
1103
1104        bank->domain = irq_domain_add_legacy(node, bank->width, bank->irq_base,
1105                                             0, &irq_domain_simple_ops, NULL);
1106
1107        if (bank->regs->set_dataout && bank->regs->clr_dataout)
1108                bank->set_dataout = _set_gpio_dataout_reg;
1109        else
1110                bank->set_dataout = _set_gpio_dataout_mask;
1111
1112        spin_lock_init(&bank->lock);
1113
1114        /* Static mapping, never released */
1115        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1116        if (unlikely(!res)) {
1117                dev_err(dev, "Invalid mem resource\n");
1118                return -ENODEV;
1119        }
1120
1121        if (!devm_request_mem_region(dev, res->start, resource_size(res),
1122                                     pdev->name)) {
1123                dev_err(dev, "Region already claimed\n");
1124                return -EBUSY;
1125        }
1126
1127        bank->base = devm_ioremap(dev, res->start, resource_size(res));
1128        if (!bank->base) {
1129                dev_err(dev, "Could not ioremap\n");
1130                return -ENOMEM;
1131        }
1132
1133        platform_set_drvdata(pdev, bank);
1134
1135        pm_runtime_enable(bank->dev);
1136        pm_runtime_irq_safe(bank->dev);
1137        pm_runtime_get_sync(bank->dev);
1138
1139        if (bank->is_mpuio)
1140                mpuio_init(bank);
1141
1142        omap_gpio_mod_init(bank);
1143        omap_gpio_chip_init(bank);
1144        omap_gpio_show_rev(bank);
1145
1146        pm_runtime_put(bank->dev);
1147
1148        list_add_tail(&bank->node, &omap_gpio_list);
1149
1150        return ret;
1151}
1152
1153#ifdef CONFIG_ARCH_OMAP2PLUS
1154
1155#if defined(CONFIG_PM_SLEEP)
1156static int omap_gpio_suspend(struct device *dev)
1157{
1158        struct platform_device *pdev = to_platform_device(dev);
1159        struct gpio_bank *bank = platform_get_drvdata(pdev);
1160        void __iomem *base = bank->base;
1161        void __iomem *wakeup_enable;
1162        unsigned long flags;
1163
1164        if (!bank->mod_usage || !bank->loses_context)
1165                return 0;
1166
1167        if (!bank->regs->wkup_en || !bank->suspend_wakeup)
1168                return 0;
1169
1170        wakeup_enable = bank->base + bank->regs->wkup_en;
1171
1172        spin_lock_irqsave(&bank->lock, flags);
1173        bank->saved_wakeup = __raw_readl(wakeup_enable);
1174        _gpio_rmw(base, bank->regs->wkup_en, 0xffffffff, 0);
1175        _gpio_rmw(base, bank->regs->wkup_en, bank->suspend_wakeup, 1);
1176        spin_unlock_irqrestore(&bank->lock, flags);
1177
1178        return 0;
1179}
1180
1181static int omap_gpio_resume(struct device *dev)
1182{
1183        struct platform_device *pdev = to_platform_device(dev);
1184        struct gpio_bank *bank = platform_get_drvdata(pdev);
1185        void __iomem *base = bank->base;
1186        unsigned long flags;
1187
1188        if (!bank->mod_usage || !bank->loses_context)
1189                return 0;
1190
1191        if (!bank->regs->wkup_en || !bank->saved_wakeup)
1192                return 0;
1193
1194        spin_lock_irqsave(&bank->lock, flags);
1195        _gpio_rmw(base, bank->regs->wkup_en, 0xffffffff, 0);
1196        _gpio_rmw(base, bank->regs->wkup_en, bank->saved_wakeup, 1);
1197        spin_unlock_irqrestore(&bank->lock, flags);
1198
1199        return 0;
1200}
1201#endif /* CONFIG_PM_SLEEP */
1202
1203#if defined(CONFIG_PM_RUNTIME)
1204static void omap_gpio_restore_context(struct gpio_bank *bank);
1205
1206static int omap_gpio_runtime_suspend(struct device *dev)
1207{
1208        struct platform_device *pdev = to_platform_device(dev);
1209        struct gpio_bank *bank = platform_get_drvdata(pdev);
1210        u32 l1 = 0, l2 = 0;
1211        unsigned long flags;
1212        u32 wake_low, wake_hi;
1213
1214        spin_lock_irqsave(&bank->lock, flags);
1215
1216        /*
1217         * Only edges can generate a wakeup event to the PRCM.
1218         *
1219         * Therefore, ensure any wake-up capable GPIOs have
1220         * edge-detection enabled before going idle to ensure a wakeup
1221         * to the PRCM is generated on a GPIO transition. (c.f. 34xx
1222         * NDA TRM 25.5.3.1)
1223         *
1224         * The normal values will be restored upon ->runtime_resume()
1225         * by writing back the values saved in bank->context.
1226         */
1227        wake_low = bank->context.leveldetect0 & bank->context.wake_en;
1228        if (wake_low)
1229                __raw_writel(wake_low | bank->context.fallingdetect,
1230                             bank->base + bank->regs->fallingdetect);
1231        wake_hi = bank->context.leveldetect1 & bank->context.wake_en;
1232        if (wake_hi)
1233                __raw_writel(wake_hi | bank->context.risingdetect,
1234                             bank->base + bank->regs->risingdetect);
1235
1236        if (bank->power_mode != OFF_MODE) {
1237                bank->power_mode = 0;
1238                goto update_gpio_context_count;
1239        }
1240        /*
1241         * If going to OFF, remove triggering for all
1242         * non-wakeup GPIOs.  Otherwise spurious IRQs will be
1243         * generated.  See OMAP2420 Errata item 1.101.
1244         */
1245        bank->saved_datain = __raw_readl(bank->base +
1246                                                bank->regs->datain);
1247        l1 = __raw_readl(bank->base + bank->regs->fallingdetect);
1248        l2 = __raw_readl(bank->base + bank->regs->risingdetect);
1249
1250        bank->saved_fallingdetect = l1;
1251        bank->saved_risingdetect = l2;
1252        l1 &= ~bank->enabled_non_wakeup_gpios;
1253        l2 &= ~bank->enabled_non_wakeup_gpios;
1254
1255        __raw_writel(l1, bank->base + bank->regs->fallingdetect);
1256        __raw_writel(l2, bank->base + bank->regs->risingdetect);
1257
1258        bank->workaround_enabled = true;
1259
1260update_gpio_context_count:
1261        if (bank->get_context_loss_count)
1262                bank->context_loss_count =
1263                                bank->get_context_loss_count(bank->dev);
1264
1265        _gpio_dbck_disable(bank);
1266        spin_unlock_irqrestore(&bank->lock, flags);
1267
1268        return 0;
1269}
1270
1271static int omap_gpio_runtime_resume(struct device *dev)
1272{
1273        struct platform_device *pdev = to_platform_device(dev);
1274        struct gpio_bank *bank = platform_get_drvdata(pdev);
1275        int context_lost_cnt_after;
1276        u32 l = 0, gen, gen0, gen1;
1277        unsigned long flags;
1278
1279        spin_lock_irqsave(&bank->lock, flags);
1280        _gpio_dbck_enable(bank);
1281
1282        /*
1283         * In ->runtime_suspend(), level-triggered, wakeup-enabled
1284         * GPIOs were set to edge trigger also in order to be able to
1285         * generate a PRCM wakeup.  Here we restore the
1286         * pre-runtime_suspend() values for edge triggering.
1287         */
1288        __raw_writel(bank->context.fallingdetect,
1289                     bank->base + bank->regs->fallingdetect);
1290        __raw_writel(bank->context.risingdetect,
1291                     bank->base + bank->regs->risingdetect);
1292
1293        if (!bank->workaround_enabled) {
1294                spin_unlock_irqrestore(&bank->lock, flags);
1295                return 0;
1296        }
1297
1298        if (bank->get_context_loss_count) {
1299                context_lost_cnt_after =
1300                        bank->get_context_loss_count(bank->dev);
1301                if (context_lost_cnt_after != bank->context_loss_count ||
1302                                                !context_lost_cnt_after) {
1303                        omap_gpio_restore_context(bank);
1304                } else {
1305                        spin_unlock_irqrestore(&bank->lock, flags);
1306                        return 0;
1307                }
1308        }
1309
1310        __raw_writel(bank->saved_fallingdetect,
1311                        bank->base + bank->regs->fallingdetect);
1312        __raw_writel(bank->saved_risingdetect,
1313                        bank->base + bank->regs->risingdetect);
1314        l = __raw_readl(bank->base + bank->regs->datain);
1315
1316        /*
1317         * Check if any of the non-wakeup interrupt GPIOs have changed
1318         * state.  If so, generate an IRQ by software.  This is
1319         * horribly racy, but it's the best we can do to work around
1320         * this silicon bug.
1321         */
1322        l ^= bank->saved_datain;
1323        l &= bank->enabled_non_wakeup_gpios;
1324
1325        /*
1326         * No need to generate IRQs for the rising edge for gpio IRQs
1327         * configured with falling edge only; and vice versa.
1328         */
1329        gen0 = l & bank->saved_fallingdetect;
1330        gen0 &= bank->saved_datain;
1331
1332        gen1 = l & bank->saved_risingdetect;
1333        gen1 &= ~(bank->saved_datain);
1334
1335        /* FIXME: Consider GPIO IRQs with level detections properly! */
1336        gen = l & (~(bank->saved_fallingdetect) & ~(bank->saved_risingdetect));
1337        /* Consider all GPIO IRQs needed to be updated */
1338        gen |= gen0 | gen1;
1339
1340        if (gen) {
1341                u32 old0, old1;
1342
1343                old0 = __raw_readl(bank->base + bank->regs->leveldetect0);
1344                old1 = __raw_readl(bank->base + bank->regs->leveldetect1);
1345
1346                if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
1347                        __raw_writel(old0 | gen, bank->base +
1348                                                bank->regs->leveldetect0);
1349                        __raw_writel(old1 | gen, bank->base +
1350                                                bank->regs->leveldetect1);
1351                }
1352
1353                if (cpu_is_omap44xx()) {
1354                        __raw_writel(old0 | l, bank->base +
1355                                                bank->regs->leveldetect0);
1356                        __raw_writel(old1 | l, bank->base +
1357                                                bank->regs->leveldetect1);
1358                }
1359                __raw_writel(old0, bank->base + bank->regs->leveldetect0);
1360                __raw_writel(old1, bank->base + bank->regs->leveldetect1);
1361        }
1362
1363        bank->workaround_enabled = false;
1364        spin_unlock_irqrestore(&bank->lock, flags);
1365
1366        return 0;
1367}
1368#endif /* CONFIG_PM_RUNTIME */
1369
1370void omap2_gpio_prepare_for_idle(int pwr_mode)
1371{
1372        struct gpio_bank *bank;
1373
1374        list_for_each_entry(bank, &omap_gpio_list, node) {
1375                if (!bank->mod_usage || !bank->loses_context)
1376                        continue;
1377
1378                bank->power_mode = pwr_mode;
1379
1380                pm_runtime_put_sync_suspend(bank->dev);
1381        }
1382}
1383
1384void omap2_gpio_resume_after_idle(void)
1385{
1386        struct gpio_bank *bank;
1387
1388        list_for_each_entry(bank, &omap_gpio_list, node) {
1389                if (!bank->mod_usage || !bank->loses_context)
1390                        continue;
1391
1392                pm_runtime_get_sync(bank->dev);
1393        }
1394}
1395
1396#if defined(CONFIG_PM_RUNTIME)
1397static void omap_gpio_restore_context(struct gpio_bank *bank)
1398{
1399        __raw_writel(bank->context.wake_en,
1400                                bank->base + bank->regs->wkup_en);
1401        __raw_writel(bank->context.ctrl, bank->base + bank->regs->ctrl);
1402        __raw_writel(bank->context.leveldetect0,
1403                                bank->base + bank->regs->leveldetect0);
1404        __raw_writel(bank->context.leveldetect1,
1405                                bank->base + bank->regs->leveldetect1);
1406        __raw_writel(bank->context.risingdetect,
1407                                bank->base + bank->regs->risingdetect);
1408        __raw_writel(bank->context.fallingdetect,
1409                                bank->base + bank->regs->fallingdetect);
1410        if (bank->regs->set_dataout && bank->regs->clr_dataout)
1411                __raw_writel(bank->context.dataout,
1412                                bank->base + bank->regs->set_dataout);
1413        else
1414                __raw_writel(bank->context.dataout,
1415                                bank->base + bank->regs->dataout);
1416        __raw_writel(bank->context.oe, bank->base + bank->regs->direction);
1417
1418        if (bank->dbck_enable_mask) {
1419                __raw_writel(bank->context.debounce, bank->base +
1420                                        bank->regs->debounce);
1421                __raw_writel(bank->context.debounce_en,
1422                                        bank->base + bank->regs->debounce_en);
1423        }
1424
1425        __raw_writel(bank->context.irqenable1,
1426                                bank->base + bank->regs->irqenable);
1427        __raw_writel(bank->context.irqenable2,
1428                                bank->base + bank->regs->irqenable2);
1429}
1430#endif /* CONFIG_PM_RUNTIME */
1431#else
1432#define omap_gpio_suspend NULL
1433#define omap_gpio_resume NULL
1434#define omap_gpio_runtime_suspend NULL
1435#define omap_gpio_runtime_resume NULL
1436#endif
1437
1438static const struct dev_pm_ops gpio_pm_ops = {
1439        SET_SYSTEM_SLEEP_PM_OPS(omap_gpio_suspend, omap_gpio_resume)
1440        SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
1441                                                                        NULL)
1442};
1443
1444#if defined(CONFIG_OF)
1445static struct omap_gpio_reg_offs omap2_gpio_regs = {
1446        .revision =             OMAP24XX_GPIO_REVISION,
1447        .direction =            OMAP24XX_GPIO_OE,
1448        .datain =               OMAP24XX_GPIO_DATAIN,
1449        .dataout =              OMAP24XX_GPIO_DATAOUT,
1450        .set_dataout =          OMAP24XX_GPIO_SETDATAOUT,
1451        .clr_dataout =          OMAP24XX_GPIO_CLEARDATAOUT,
1452        .irqstatus =            OMAP24XX_GPIO_IRQSTATUS1,
1453        .irqstatus2 =           OMAP24XX_GPIO_IRQSTATUS2,
1454        .irqenable =            OMAP24XX_GPIO_IRQENABLE1,
1455        .irqenable2 =           OMAP24XX_GPIO_IRQENABLE2,
1456        .set_irqenable =        OMAP24XX_GPIO_SETIRQENABLE1,
1457        .clr_irqenable =        OMAP24XX_GPIO_CLEARIRQENABLE1,
1458        .debounce =             OMAP24XX_GPIO_DEBOUNCE_VAL,
1459        .debounce_en =          OMAP24XX_GPIO_DEBOUNCE_EN,
1460        .ctrl =                 OMAP24XX_GPIO_CTRL,
1461        .wkup_en =              OMAP24XX_GPIO_WAKE_EN,
1462        .leveldetect0 =         OMAP24XX_GPIO_LEVELDETECT0,
1463        .leveldetect1 =         OMAP24XX_GPIO_LEVELDETECT1,
1464        .risingdetect =         OMAP24XX_GPIO_RISINGDETECT,
1465        .fallingdetect =        OMAP24XX_GPIO_FALLINGDETECT,
1466};
1467
1468static struct omap_gpio_reg_offs omap4_gpio_regs = {
1469        .revision =             OMAP4_GPIO_REVISION,
1470        .direction =            OMAP4_GPIO_OE,
1471        .datain =               OMAP4_GPIO_DATAIN,
1472        .dataout =              OMAP4_GPIO_DATAOUT,
1473        .set_dataout =          OMAP4_GPIO_SETDATAOUT,
1474        .clr_dataout =          OMAP4_GPIO_CLEARDATAOUT,
1475        .irqstatus =            OMAP4_GPIO_IRQSTATUS0,
1476        .irqstatus2 =           OMAP4_GPIO_IRQSTATUS1,
1477        .irqenable =            OMAP4_GPIO_IRQSTATUSSET0,
1478        .irqenable2 =           OMAP4_GPIO_IRQSTATUSSET1,
1479        .set_irqenable =        OMAP4_GPIO_IRQSTATUSSET0,
1480        .clr_irqenable =        OMAP4_GPIO_IRQSTATUSCLR0,
1481        .debounce =             OMAP4_GPIO_DEBOUNCINGTIME,
1482        .debounce_en =          OMAP4_GPIO_DEBOUNCENABLE,
1483        .ctrl =                 OMAP4_GPIO_CTRL,
1484        .wkup_en =              OMAP4_GPIO_IRQWAKEN0,
1485        .leveldetect0 =         OMAP4_GPIO_LEVELDETECT0,
1486        .leveldetect1 =         OMAP4_GPIO_LEVELDETECT1,
1487        .risingdetect =         OMAP4_GPIO_RISINGDETECT,
1488        .fallingdetect =        OMAP4_GPIO_FALLINGDETECT,
1489};
1490
1491static struct omap_gpio_platform_data omap2_pdata = {
1492        .regs = &omap2_gpio_regs,
1493        .bank_width = 32,
1494        .dbck_flag = false,
1495};
1496
1497static struct omap_gpio_platform_data omap3_pdata = {
1498        .regs = &omap2_gpio_regs,
1499        .bank_width = 32,
1500        .dbck_flag = true,
1501};
1502
1503static struct omap_gpio_platform_data omap4_pdata = {
1504        .regs = &omap4_gpio_regs,
1505        .bank_width = 32,
1506        .dbck_flag = true,
1507};
1508
1509static const struct of_device_id omap_gpio_match[] = {
1510        {
1511                .compatible = "ti,omap4-gpio",
1512                .data = &omap4_pdata,
1513        },
1514        {
1515                .compatible = "ti,omap3-gpio",
1516                .data = &omap3_pdata,
1517        },
1518        {
1519                .compatible = "ti,omap2-gpio",
1520                .data = &omap2_pdata,
1521        },
1522        { },
1523};
1524MODULE_DEVICE_TABLE(of, omap_gpio_match);
1525#endif
1526
1527static struct platform_driver omap_gpio_driver = {
1528        .probe          = omap_gpio_probe,
1529        .driver         = {
1530                .name   = "omap_gpio",
1531                .pm     = &gpio_pm_ops,
1532                .of_match_table = of_match_ptr(omap_gpio_match),
1533        },
1534};
1535
1536/*
1537 * gpio driver register needs to be done before
1538 * machine_init functions access gpio APIs.
1539 * Hence omap_gpio_drv_reg() is a postcore_initcall.
1540 */
1541static int __init omap_gpio_drv_reg(void)
1542{
1543        return platform_driver_register(&omap_gpio_driver);
1544}
1545postcore_initcall(omap_gpio_drv_reg);
1546