linux/drivers/gpio/gpio-omap.c
<<
>>
Prefs
   1/*
   2 * Support functions for OMAP GPIO
   3 *
   4 * Copyright (C) 2003-2005 Nokia Corporation
   5 * Written by Juha Yrjölä <juha.yrjola@nokia.com>
   6 *
   7 * Copyright (C) 2009 Texas Instruments
   8 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation.
  13 */
  14
  15#include <linux/init.h>
  16#include <linux/module.h>
  17#include <linux/interrupt.h>
  18#include <linux/syscore_ops.h>
  19#include <linux/err.h>
  20#include <linux/clk.h>
  21#include <linux/io.h>
  22#include <linux/device.h>
  23#include <linux/pm_runtime.h>
  24#include <linux/pm.h>
  25#include <linux/of.h>
  26#include <linux/of_device.h>
  27#include <linux/irqdomain.h>
  28#include <linux/irqchip/chained_irq.h>
  29#include <linux/gpio.h>
  30#include <linux/platform_data/gpio-omap.h>
  31
  32#define OFF_MODE        1
  33
  34static LIST_HEAD(omap_gpio_list);
  35
  36struct gpio_regs {
  37        u32 irqenable1;
  38        u32 irqenable2;
  39        u32 wake_en;
  40        u32 ctrl;
  41        u32 oe;
  42        u32 leveldetect0;
  43        u32 leveldetect1;
  44        u32 risingdetect;
  45        u32 fallingdetect;
  46        u32 dataout;
  47        u32 debounce;
  48        u32 debounce_en;
  49};
  50
  51struct gpio_bank {
  52        struct list_head node;
  53        void __iomem *base;
  54        u16 irq;
  55        struct irq_domain *domain;
  56        u32 non_wakeup_gpios;
  57        u32 enabled_non_wakeup_gpios;
  58        struct gpio_regs context;
  59        u32 saved_datain;
  60        u32 level_mask;
  61        u32 toggle_mask;
  62        spinlock_t lock;
  63        struct gpio_chip chip;
  64        struct clk *dbck;
  65        u32 mod_usage;
  66        u32 irq_usage;
  67        u32 dbck_enable_mask;
  68        bool dbck_enabled;
  69        struct device *dev;
  70        bool is_mpuio;
  71        bool dbck_flag;
  72        bool loses_context;
  73        bool context_valid;
  74        int stride;
  75        u32 width;
  76        int context_loss_count;
  77        int power_mode;
  78        bool workaround_enabled;
  79
  80        void (*set_dataout)(struct gpio_bank *bank, int gpio, int enable);
  81        int (*get_context_loss_count)(struct device *dev);
  82
  83        struct omap_gpio_reg_offs *regs;
  84};
  85
  86#define GPIO_INDEX(bank, gpio) (gpio % bank->width)
  87#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
  88#define GPIO_MOD_CTRL_BIT       BIT(0)
  89
  90#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
  91#define LINE_USED(line, offset) (line & (1 << offset))
  92
  93static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
  94{
  95        return bank->chip.base + gpio_irq;
  96}
  97
  98static int omap_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
  99{
 100        struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
 101
 102        return irq_find_mapping(bank->domain, offset);
 103}
 104
 105static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
 106{
 107        void __iomem *reg = bank->base;
 108        u32 l;
 109
 110        reg += bank->regs->direction;
 111        l = __raw_readl(reg);
 112        if (is_input)
 113                l |= 1 << gpio;
 114        else
 115                l &= ~(1 << gpio);
 116        __raw_writel(l, reg);
 117        bank->context.oe = l;
 118}
 119
 120
 121/* set data out value using dedicate set/clear register */
 122static void _set_gpio_dataout_reg(struct gpio_bank *bank, int gpio, int enable)
 123{
 124        void __iomem *reg = bank->base;
 125        u32 l = GPIO_BIT(bank, gpio);
 126
 127        if (enable) {
 128                reg += bank->regs->set_dataout;
 129                bank->context.dataout |= l;
 130        } else {
 131                reg += bank->regs->clr_dataout;
 132                bank->context.dataout &= ~l;
 133        }
 134
 135        __raw_writel(l, reg);
 136}
 137
 138/* set data out value using mask register */
 139static void _set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, int enable)
 140{
 141        void __iomem *reg = bank->base + bank->regs->dataout;
 142        u32 gpio_bit = GPIO_BIT(bank, gpio);
 143        u32 l;
 144
 145        l = __raw_readl(reg);
 146        if (enable)
 147                l |= gpio_bit;
 148        else
 149                l &= ~gpio_bit;
 150        __raw_writel(l, reg);
 151        bank->context.dataout = l;
 152}
 153
 154static int _get_gpio_datain(struct gpio_bank *bank, int offset)
 155{
 156        void __iomem *reg = bank->base + bank->regs->datain;
 157
 158        return (__raw_readl(reg) & (1 << offset)) != 0;
 159}
 160
 161static int _get_gpio_dataout(struct gpio_bank *bank, int offset)
 162{
 163        void __iomem *reg = bank->base + bank->regs->dataout;
 164
 165        return (__raw_readl(reg) & (1 << offset)) != 0;
 166}
 167
 168static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
 169{
 170        int l = __raw_readl(base + reg);
 171
 172        if (set)
 173                l |= mask;
 174        else
 175                l &= ~mask;
 176
 177        __raw_writel(l, base + reg);
 178}
 179
 180static inline void _gpio_dbck_enable(struct gpio_bank *bank)
 181{
 182        if (bank->dbck_enable_mask && !bank->dbck_enabled) {
 183                clk_enable(bank->dbck);
 184                bank->dbck_enabled = true;
 185
 186                __raw_writel(bank->dbck_enable_mask,
 187                             bank->base + bank->regs->debounce_en);
 188        }
 189}
 190
 191static inline void _gpio_dbck_disable(struct gpio_bank *bank)
 192{
 193        if (bank->dbck_enable_mask && bank->dbck_enabled) {
 194                /*
 195                 * Disable debounce before cutting it's clock. If debounce is
 196                 * enabled but the clock is not, GPIO module seems to be unable
 197                 * to detect events and generate interrupts at least on OMAP3.
 198                 */
 199                __raw_writel(0, bank->base + bank->regs->debounce_en);
 200
 201                clk_disable(bank->dbck);
 202                bank->dbck_enabled = false;
 203        }
 204}
 205
 206/**
 207 * _set_gpio_debounce - low level gpio debounce time
 208 * @bank: the gpio bank we're acting upon
 209 * @gpio: the gpio number on this @gpio
 210 * @debounce: debounce time to use
 211 *
 212 * OMAP's debounce time is in 31us steps so we need
 213 * to convert and round up to the closest unit.
 214 */
 215static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
 216                unsigned debounce)
 217{
 218        void __iomem            *reg;
 219        u32                     val;
 220        u32                     l;
 221
 222        if (!bank->dbck_flag)
 223                return;
 224
 225        if (debounce < 32)
 226                debounce = 0x01;
 227        else if (debounce > 7936)
 228                debounce = 0xff;
 229        else
 230                debounce = (debounce / 0x1f) - 1;
 231
 232        l = GPIO_BIT(bank, gpio);
 233
 234        clk_enable(bank->dbck);
 235        reg = bank->base + bank->regs->debounce;
 236        __raw_writel(debounce, reg);
 237
 238        reg = bank->base + bank->regs->debounce_en;
 239        val = __raw_readl(reg);
 240
 241        if (debounce)
 242                val |= l;
 243        else
 244                val &= ~l;
 245        bank->dbck_enable_mask = val;
 246
 247        __raw_writel(val, reg);
 248        clk_disable(bank->dbck);
 249        /*
 250         * Enable debounce clock per module.
 251         * This call is mandatory because in omap_gpio_request() when
 252         * *_runtime_get_sync() is called,  _gpio_dbck_enable() within
 253         * runtime callbck fails to turn on dbck because dbck_enable_mask
 254         * used within _gpio_dbck_enable() is still not initialized at
 255         * that point. Therefore we have to enable dbck here.
 256         */
 257        _gpio_dbck_enable(bank);
 258        if (bank->dbck_enable_mask) {
 259                bank->context.debounce = debounce;
 260                bank->context.debounce_en = val;
 261        }
 262}
 263
 264/**
 265 * _clear_gpio_debounce - clear debounce settings for a gpio
 266 * @bank: the gpio bank we're acting upon
 267 * @gpio: the gpio number on this @gpio
 268 *
 269 * If a gpio is using debounce, then clear the debounce enable bit and if
 270 * this is the only gpio in this bank using debounce, then clear the debounce
 271 * time too. The debounce clock will also be disabled when calling this function
 272 * if this is the only gpio in the bank using debounce.
 273 */
 274static void _clear_gpio_debounce(struct gpio_bank *bank, unsigned gpio)
 275{
 276        u32 gpio_bit = GPIO_BIT(bank, gpio);
 277
 278        if (!bank->dbck_flag)
 279                return;
 280
 281        if (!(bank->dbck_enable_mask & gpio_bit))
 282                return;
 283
 284        bank->dbck_enable_mask &= ~gpio_bit;
 285        bank->context.debounce_en &= ~gpio_bit;
 286        __raw_writel(bank->context.debounce_en,
 287                     bank->base + bank->regs->debounce_en);
 288
 289        if (!bank->dbck_enable_mask) {
 290                bank->context.debounce = 0;
 291                __raw_writel(bank->context.debounce, bank->base +
 292                             bank->regs->debounce);
 293                clk_disable(bank->dbck);
 294                bank->dbck_enabled = false;
 295        }
 296}
 297
 298static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio,
 299                                                unsigned trigger)
 300{
 301        void __iomem *base = bank->base;
 302        u32 gpio_bit = 1 << gpio;
 303
 304        _gpio_rmw(base, bank->regs->leveldetect0, gpio_bit,
 305                  trigger & IRQ_TYPE_LEVEL_LOW);
 306        _gpio_rmw(base, bank->regs->leveldetect1, gpio_bit,
 307                  trigger & IRQ_TYPE_LEVEL_HIGH);
 308        _gpio_rmw(base, bank->regs->risingdetect, gpio_bit,
 309                  trigger & IRQ_TYPE_EDGE_RISING);
 310        _gpio_rmw(base, bank->regs->fallingdetect, gpio_bit,
 311                  trigger & IRQ_TYPE_EDGE_FALLING);
 312
 313        bank->context.leveldetect0 =
 314                        __raw_readl(bank->base + bank->regs->leveldetect0);
 315        bank->context.leveldetect1 =
 316                        __raw_readl(bank->base + bank->regs->leveldetect1);
 317        bank->context.risingdetect =
 318                        __raw_readl(bank->base + bank->regs->risingdetect);
 319        bank->context.fallingdetect =
 320                        __raw_readl(bank->base + bank->regs->fallingdetect);
 321
 322        if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
 323                _gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
 324                bank->context.wake_en =
 325                        __raw_readl(bank->base + bank->regs->wkup_en);
 326        }
 327
 328        /* This part needs to be executed always for OMAP{34xx, 44xx} */
 329        if (!bank->regs->irqctrl) {
 330                /* On omap24xx proceed only when valid GPIO bit is set */
 331                if (bank->non_wakeup_gpios) {
 332                        if (!(bank->non_wakeup_gpios & gpio_bit))
 333                                goto exit;
 334                }
 335
 336                /*
 337                 * Log the edge gpio and manually trigger the IRQ
 338                 * after resume if the input level changes
 339                 * to avoid irq lost during PER RET/OFF mode
 340                 * Applies for omap2 non-wakeup gpio and all omap3 gpios
 341                 */
 342                if (trigger & IRQ_TYPE_EDGE_BOTH)
 343                        bank->enabled_non_wakeup_gpios |= gpio_bit;
 344                else
 345                        bank->enabled_non_wakeup_gpios &= ~gpio_bit;
 346        }
 347
 348exit:
 349        bank->level_mask =
 350                __raw_readl(bank->base + bank->regs->leveldetect0) |
 351                __raw_readl(bank->base + bank->regs->leveldetect1);
 352}
 353
 354#ifdef CONFIG_ARCH_OMAP1
 355/*
 356 * This only applies to chips that can't do both rising and falling edge
 357 * detection at once.  For all other chips, this function is a noop.
 358 */
 359static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
 360{
 361        void __iomem *reg = bank->base;
 362        u32 l = 0;
 363
 364        if (!bank->regs->irqctrl)
 365                return;
 366
 367        reg += bank->regs->irqctrl;
 368
 369        l = __raw_readl(reg);
 370        if ((l >> gpio) & 1)
 371                l &= ~(1 << gpio);
 372        else
 373                l |= 1 << gpio;
 374
 375        __raw_writel(l, reg);
 376}
 377#else
 378static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {}
 379#endif
 380
 381static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
 382                                                        unsigned trigger)
 383{
 384        void __iomem *reg = bank->base;
 385        void __iomem *base = bank->base;
 386        u32 l = 0;
 387
 388        if (bank->regs->leveldetect0 && bank->regs->wkup_en) {
 389                set_gpio_trigger(bank, gpio, trigger);
 390        } else if (bank->regs->irqctrl) {
 391                reg += bank->regs->irqctrl;
 392
 393                l = __raw_readl(reg);
 394                if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
 395                        bank->toggle_mask |= 1 << gpio;
 396                if (trigger & IRQ_TYPE_EDGE_RISING)
 397                        l |= 1 << gpio;
 398                else if (trigger & IRQ_TYPE_EDGE_FALLING)
 399                        l &= ~(1 << gpio);
 400                else
 401                        return -EINVAL;
 402
 403                __raw_writel(l, reg);
 404        } else if (bank->regs->edgectrl1) {
 405                if (gpio & 0x08)
 406                        reg += bank->regs->edgectrl2;
 407                else
 408                        reg += bank->regs->edgectrl1;
 409
 410                gpio &= 0x07;
 411                l = __raw_readl(reg);
 412                l &= ~(3 << (gpio << 1));
 413                if (trigger & IRQ_TYPE_EDGE_RISING)
 414                        l |= 2 << (gpio << 1);
 415                if (trigger & IRQ_TYPE_EDGE_FALLING)
 416                        l |= 1 << (gpio << 1);
 417
 418                /* Enable wake-up during idle for dynamic tick */
 419                _gpio_rmw(base, bank->regs->wkup_en, 1 << gpio, trigger);
 420                bank->context.wake_en =
 421                        __raw_readl(bank->base + bank->regs->wkup_en);
 422                __raw_writel(l, reg);
 423        }
 424        return 0;
 425}
 426
 427static void _enable_gpio_module(struct gpio_bank *bank, unsigned offset)
 428{
 429        if (bank->regs->pinctrl) {
 430                void __iomem *reg = bank->base + bank->regs->pinctrl;
 431
 432                /* Claim the pin for MPU */
 433                __raw_writel(__raw_readl(reg) | (1 << offset), reg);
 434        }
 435
 436        if (bank->regs->ctrl && !BANK_USED(bank)) {
 437                void __iomem *reg = bank->base + bank->regs->ctrl;
 438                u32 ctrl;
 439
 440                ctrl = __raw_readl(reg);
 441                /* Module is enabled, clocks are not gated */
 442                ctrl &= ~GPIO_MOD_CTRL_BIT;
 443                __raw_writel(ctrl, reg);
 444                bank->context.ctrl = ctrl;
 445        }
 446}
 447
 448static void _disable_gpio_module(struct gpio_bank *bank, unsigned offset)
 449{
 450        void __iomem *base = bank->base;
 451
 452        if (bank->regs->wkup_en &&
 453            !LINE_USED(bank->mod_usage, offset) &&
 454            !LINE_USED(bank->irq_usage, offset)) {
 455                /* Disable wake-up during idle for dynamic tick */
 456                _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
 457                bank->context.wake_en =
 458                        __raw_readl(bank->base + bank->regs->wkup_en);
 459        }
 460
 461        if (bank->regs->ctrl && !BANK_USED(bank)) {
 462                void __iomem *reg = bank->base + bank->regs->ctrl;
 463                u32 ctrl;
 464
 465                ctrl = __raw_readl(reg);
 466                /* Module is disabled, clocks are gated */
 467                ctrl |= GPIO_MOD_CTRL_BIT;
 468                __raw_writel(ctrl, reg);
 469                bank->context.ctrl = ctrl;
 470        }
 471}
 472
 473static int gpio_is_input(struct gpio_bank *bank, int mask)
 474{
 475        void __iomem *reg = bank->base + bank->regs->direction;
 476
 477        return __raw_readl(reg) & mask;
 478}
 479
 480static int gpio_irq_type(struct irq_data *d, unsigned type)
 481{
 482        struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 483        unsigned gpio = 0;
 484        int retval;
 485        unsigned long flags;
 486        unsigned offset;
 487
 488        if (!BANK_USED(bank))
 489                pm_runtime_get_sync(bank->dev);
 490
 491#ifdef CONFIG_ARCH_OMAP1
 492        if (d->irq > IH_MPUIO_BASE)
 493                gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
 494#endif
 495
 496        if (!gpio)
 497                gpio = irq_to_gpio(bank, d->hwirq);
 498
 499        if (type & ~IRQ_TYPE_SENSE_MASK)
 500                return -EINVAL;
 501
 502        if (!bank->regs->leveldetect0 &&
 503                (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
 504                return -EINVAL;
 505
 506        spin_lock_irqsave(&bank->lock, flags);
 507        offset = GPIO_INDEX(bank, gpio);
 508        retval = _set_gpio_triggering(bank, offset, type);
 509        if (!LINE_USED(bank->mod_usage, offset)) {
 510                _enable_gpio_module(bank, offset);
 511                _set_gpio_direction(bank, offset, 1);
 512        } else if (!gpio_is_input(bank, 1 << offset)) {
 513                spin_unlock_irqrestore(&bank->lock, flags);
 514                return -EINVAL;
 515        }
 516
 517        bank->irq_usage |= 1 << GPIO_INDEX(bank, gpio);
 518        spin_unlock_irqrestore(&bank->lock, flags);
 519
 520        if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
 521                __irq_set_handler_locked(d->irq, handle_level_irq);
 522        else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
 523                __irq_set_handler_locked(d->irq, handle_edge_irq);
 524
 525        return retval;
 526}
 527
 528static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
 529{
 530        void __iomem *reg = bank->base;
 531
 532        reg += bank->regs->irqstatus;
 533        __raw_writel(gpio_mask, reg);
 534
 535        /* Workaround for clearing DSP GPIO interrupts to allow retention */
 536        if (bank->regs->irqstatus2) {
 537                reg = bank->base + bank->regs->irqstatus2;
 538                __raw_writel(gpio_mask, reg);
 539        }
 540
 541        /* Flush posted write for the irq status to avoid spurious interrupts */
 542        __raw_readl(reg);
 543}
 544
 545static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
 546{
 547        _clear_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
 548}
 549
 550static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
 551{
 552        void __iomem *reg = bank->base;
 553        u32 l;
 554        u32 mask = (1 << bank->width) - 1;
 555
 556        reg += bank->regs->irqenable;
 557        l = __raw_readl(reg);
 558        if (bank->regs->irqenable_inv)
 559                l = ~l;
 560        l &= mask;
 561        return l;
 562}
 563
 564static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
 565{
 566        void __iomem *reg = bank->base;
 567        u32 l;
 568
 569        if (bank->regs->set_irqenable) {
 570                reg += bank->regs->set_irqenable;
 571                l = gpio_mask;
 572                bank->context.irqenable1 |= gpio_mask;
 573        } else {
 574                reg += bank->regs->irqenable;
 575                l = __raw_readl(reg);
 576                if (bank->regs->irqenable_inv)
 577                        l &= ~gpio_mask;
 578                else
 579                        l |= gpio_mask;
 580                bank->context.irqenable1 = l;
 581        }
 582
 583        __raw_writel(l, reg);
 584}
 585
 586static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
 587{
 588        void __iomem *reg = bank->base;
 589        u32 l;
 590
 591        if (bank->regs->clr_irqenable) {
 592                reg += bank->regs->clr_irqenable;
 593                l = gpio_mask;
 594                bank->context.irqenable1 &= ~gpio_mask;
 595        } else {
 596                reg += bank->regs->irqenable;
 597                l = __raw_readl(reg);
 598                if (bank->regs->irqenable_inv)
 599                        l |= gpio_mask;
 600                else
 601                        l &= ~gpio_mask;
 602                bank->context.irqenable1 = l;
 603        }
 604
 605        __raw_writel(l, reg);
 606}
 607
 608static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable)
 609{
 610        if (enable)
 611                _enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
 612        else
 613                _disable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
 614}
 615
 616/*
 617 * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register.
 618 * 1510 does not seem to have a wake-up register. If JTAG is connected
 619 * to the target, system will wake up always on GPIO events. While
 620 * system is running all registered GPIO interrupts need to have wake-up
 621 * enabled. When system is suspended, only selected GPIO interrupts need
 622 * to have wake-up enabled.
 623 */
 624static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
 625{
 626        u32 gpio_bit = GPIO_BIT(bank, gpio);
 627        unsigned long flags;
 628
 629        if (bank->non_wakeup_gpios & gpio_bit) {
 630                dev_err(bank->dev,
 631                        "Unable to modify wakeup on non-wakeup GPIO%d\n", gpio);
 632                return -EINVAL;
 633        }
 634
 635        spin_lock_irqsave(&bank->lock, flags);
 636        if (enable)
 637                bank->context.wake_en |= gpio_bit;
 638        else
 639                bank->context.wake_en &= ~gpio_bit;
 640
 641        __raw_writel(bank->context.wake_en, bank->base + bank->regs->wkup_en);
 642        spin_unlock_irqrestore(&bank->lock, flags);
 643
 644        return 0;
 645}
 646
 647static void _reset_gpio(struct gpio_bank *bank, int gpio)
 648{
 649        _set_gpio_direction(bank, GPIO_INDEX(bank, gpio), 1);
 650        _set_gpio_irqenable(bank, gpio, 0);
 651        _clear_gpio_irqstatus(bank, gpio);
 652        _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
 653        _clear_gpio_debounce(bank, gpio);
 654}
 655
 656/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
 657static int gpio_wake_enable(struct irq_data *d, unsigned int enable)
 658{
 659        struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 660        unsigned int gpio = irq_to_gpio(bank, d->hwirq);
 661
 662        return _set_gpio_wakeup(bank, gpio, enable);
 663}
 664
 665static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
 666{
 667        struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
 668        unsigned long flags;
 669
 670        /*
 671         * If this is the first gpio_request for the bank,
 672         * enable the bank module.
 673         */
 674        if (!BANK_USED(bank))
 675                pm_runtime_get_sync(bank->dev);
 676
 677        spin_lock_irqsave(&bank->lock, flags);
 678        /* Set trigger to none. You need to enable the desired trigger with
 679         * request_irq() or set_irq_type(). Only do this if the IRQ line has
 680         * not already been requested.
 681         */
 682        if (!LINE_USED(bank->irq_usage, offset)) {
 683                _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
 684                _enable_gpio_module(bank, offset);
 685        }
 686        bank->mod_usage |= 1 << offset;
 687        spin_unlock_irqrestore(&bank->lock, flags);
 688
 689        return 0;
 690}
 691
 692static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
 693{
 694        struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
 695        unsigned long flags;
 696
 697        spin_lock_irqsave(&bank->lock, flags);
 698        bank->mod_usage &= ~(1 << offset);
 699        _disable_gpio_module(bank, offset);
 700        _reset_gpio(bank, bank->chip.base + offset);
 701        spin_unlock_irqrestore(&bank->lock, flags);
 702
 703        /*
 704         * If this is the last gpio to be freed in the bank,
 705         * disable the bank module.
 706         */
 707        if (!BANK_USED(bank))
 708                pm_runtime_put(bank->dev);
 709}
 710
 711/*
 712 * We need to unmask the GPIO bank interrupt as soon as possible to
 713 * avoid missing GPIO interrupts for other lines in the bank.
 714 * Then we need to mask-read-clear-unmask the triggered GPIO lines
 715 * in the bank to avoid missing nested interrupts for a GPIO line.
 716 * If we wait to unmask individual GPIO lines in the bank after the
 717 * line's interrupt handler has been run, we may miss some nested
 718 * interrupts.
 719 */
 720static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
 721{
 722        void __iomem *isr_reg = NULL;
 723        u32 isr;
 724        unsigned int bit;
 725        struct gpio_bank *bank;
 726        int unmasked = 0;
 727        struct irq_chip *chip = irq_desc_get_chip(desc);
 728
 729        chained_irq_enter(chip, desc);
 730
 731        bank = irq_get_handler_data(irq);
 732        isr_reg = bank->base + bank->regs->irqstatus;
 733        pm_runtime_get_sync(bank->dev);
 734
 735        if (WARN_ON(!isr_reg))
 736                goto exit;
 737
 738        while (1) {
 739                u32 isr_saved, level_mask = 0;
 740                u32 enabled;
 741
 742                enabled = _get_gpio_irqbank_mask(bank);
 743                isr_saved = isr = __raw_readl(isr_reg) & enabled;
 744
 745                if (bank->level_mask)
 746                        level_mask = bank->level_mask & enabled;
 747
 748                /* clear edge sensitive interrupts before handler(s) are
 749                called so that we don't miss any interrupt occurred while
 750                executing them */
 751                _disable_gpio_irqbank(bank, isr_saved & ~level_mask);
 752                _clear_gpio_irqbank(bank, isr_saved & ~level_mask);
 753                _enable_gpio_irqbank(bank, isr_saved & ~level_mask);
 754
 755                /* if there is only edge sensitive GPIO pin interrupts
 756                configured, we could unmask GPIO bank interrupt immediately */
 757                if (!level_mask && !unmasked) {
 758                        unmasked = 1;
 759                        chained_irq_exit(chip, desc);
 760                }
 761
 762                if (!isr)
 763                        break;
 764
 765                while (isr) {
 766                        bit = __ffs(isr);
 767                        isr &= ~(1 << bit);
 768
 769                        /*
 770                         * Some chips can't respond to both rising and falling
 771                         * at the same time.  If this irq was requested with
 772                         * both flags, we need to flip the ICR data for the IRQ
 773                         * to respond to the IRQ for the opposite direction.
 774                         * This will be indicated in the bank toggle_mask.
 775                         */
 776                        if (bank->toggle_mask & (1 << bit))
 777                                _toggle_gpio_edge_triggering(bank, bit);
 778
 779                        generic_handle_irq(irq_find_mapping(bank->domain, bit));
 780                }
 781        }
 782        /* if bank has any level sensitive GPIO pin interrupt
 783        configured, we must unmask the bank interrupt only after
 784        handler(s) are executed in order to avoid spurious bank
 785        interrupt */
 786exit:
 787        if (!unmasked)
 788                chained_irq_exit(chip, desc);
 789        pm_runtime_put(bank->dev);
 790}
 791
 792static void gpio_irq_shutdown(struct irq_data *d)
 793{
 794        struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 795        unsigned int gpio = irq_to_gpio(bank, d->hwirq);
 796        unsigned long flags;
 797        unsigned offset = GPIO_INDEX(bank, gpio);
 798
 799        spin_lock_irqsave(&bank->lock, flags);
 800        bank->irq_usage &= ~(1 << offset);
 801        _disable_gpio_module(bank, offset);
 802        _reset_gpio(bank, gpio);
 803        spin_unlock_irqrestore(&bank->lock, flags);
 804
 805        /*
 806         * If this is the last IRQ to be freed in the bank,
 807         * disable the bank module.
 808         */
 809        if (!BANK_USED(bank))
 810                pm_runtime_put(bank->dev);
 811}
 812
 813static void gpio_ack_irq(struct irq_data *d)
 814{
 815        struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 816        unsigned int gpio = irq_to_gpio(bank, d->hwirq);
 817
 818        _clear_gpio_irqstatus(bank, gpio);
 819}
 820
 821static void gpio_mask_irq(struct irq_data *d)
 822{
 823        struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 824        unsigned int gpio = irq_to_gpio(bank, d->hwirq);
 825        unsigned long flags;
 826
 827        spin_lock_irqsave(&bank->lock, flags);
 828        _set_gpio_irqenable(bank, gpio, 0);
 829        _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
 830        spin_unlock_irqrestore(&bank->lock, flags);
 831}
 832
 833static void gpio_unmask_irq(struct irq_data *d)
 834{
 835        struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
 836        unsigned int gpio = irq_to_gpio(bank, d->hwirq);
 837        unsigned int irq_mask = GPIO_BIT(bank, gpio);
 838        u32 trigger = irqd_get_trigger_type(d);
 839        unsigned long flags;
 840
 841        spin_lock_irqsave(&bank->lock, flags);
 842        if (trigger)
 843                _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger);
 844
 845        /* For level-triggered GPIOs, the clearing must be done after
 846         * the HW source is cleared, thus after the handler has run */
 847        if (bank->level_mask & irq_mask) {
 848                _set_gpio_irqenable(bank, gpio, 0);
 849                _clear_gpio_irqstatus(bank, gpio);
 850        }
 851
 852        _set_gpio_irqenable(bank, gpio, 1);
 853        spin_unlock_irqrestore(&bank->lock, flags);
 854}
 855
 856static struct irq_chip gpio_irq_chip = {
 857        .name           = "GPIO",
 858        .irq_shutdown   = gpio_irq_shutdown,
 859        .irq_ack        = gpio_ack_irq,
 860        .irq_mask       = gpio_mask_irq,
 861        .irq_unmask     = gpio_unmask_irq,
 862        .irq_set_type   = gpio_irq_type,
 863        .irq_set_wake   = gpio_wake_enable,
 864};
 865
 866/*---------------------------------------------------------------------*/
 867
 868static int omap_mpuio_suspend_noirq(struct device *dev)
 869{
 870        struct platform_device *pdev = to_platform_device(dev);
 871        struct gpio_bank        *bank = platform_get_drvdata(pdev);
 872        void __iomem            *mask_reg = bank->base +
 873                                        OMAP_MPUIO_GPIO_MASKIT / bank->stride;
 874        unsigned long           flags;
 875
 876        spin_lock_irqsave(&bank->lock, flags);
 877        __raw_writel(0xffff & ~bank->context.wake_en, mask_reg);
 878        spin_unlock_irqrestore(&bank->lock, flags);
 879
 880        return 0;
 881}
 882
 883static int omap_mpuio_resume_noirq(struct device *dev)
 884{
 885        struct platform_device *pdev = to_platform_device(dev);
 886        struct gpio_bank        *bank = platform_get_drvdata(pdev);
 887        void __iomem            *mask_reg = bank->base +
 888                                        OMAP_MPUIO_GPIO_MASKIT / bank->stride;
 889        unsigned long           flags;
 890
 891        spin_lock_irqsave(&bank->lock, flags);
 892        __raw_writel(bank->context.wake_en, mask_reg);
 893        spin_unlock_irqrestore(&bank->lock, flags);
 894
 895        return 0;
 896}
 897
 898static const struct dev_pm_ops omap_mpuio_dev_pm_ops = {
 899        .suspend_noirq = omap_mpuio_suspend_noirq,
 900        .resume_noirq = omap_mpuio_resume_noirq,
 901};
 902
 903/* use platform_driver for this. */
 904static struct platform_driver omap_mpuio_driver = {
 905        .driver         = {
 906                .name   = "mpuio",
 907                .pm     = &omap_mpuio_dev_pm_ops,
 908        },
 909};
 910
 911static struct platform_device omap_mpuio_device = {
 912        .name           = "mpuio",
 913        .id             = -1,
 914        .dev = {
 915                .driver = &omap_mpuio_driver.driver,
 916        }
 917        /* could list the /proc/iomem resources */
 918};
 919
 920static inline void mpuio_init(struct gpio_bank *bank)
 921{
 922        platform_set_drvdata(&omap_mpuio_device, bank);
 923
 924        if (platform_driver_register(&omap_mpuio_driver) == 0)
 925                (void) platform_device_register(&omap_mpuio_device);
 926}
 927
 928/*---------------------------------------------------------------------*/
 929
 930static int gpio_input(struct gpio_chip *chip, unsigned offset)
 931{
 932        struct gpio_bank *bank;
 933        unsigned long flags;
 934
 935        bank = container_of(chip, struct gpio_bank, chip);
 936        spin_lock_irqsave(&bank->lock, flags);
 937        _set_gpio_direction(bank, offset, 1);
 938        spin_unlock_irqrestore(&bank->lock, flags);
 939        return 0;
 940}
 941
 942static int gpio_get(struct gpio_chip *chip, unsigned offset)
 943{
 944        struct gpio_bank *bank;
 945        u32 mask;
 946
 947        bank = container_of(chip, struct gpio_bank, chip);
 948        mask = (1 << offset);
 949
 950        if (gpio_is_input(bank, mask))
 951                return _get_gpio_datain(bank, offset);
 952        else
 953                return _get_gpio_dataout(bank, offset);
 954}
 955
 956static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
 957{
 958        struct gpio_bank *bank;
 959        unsigned long flags;
 960        int retval = 0;
 961
 962        bank = container_of(chip, struct gpio_bank, chip);
 963        spin_lock_irqsave(&bank->lock, flags);
 964
 965        if (LINE_USED(bank->irq_usage, offset)) {
 966                        retval = -EINVAL;
 967                        goto exit;
 968        }
 969
 970        bank->set_dataout(bank, offset, value);
 971        _set_gpio_direction(bank, offset, 0);
 972
 973exit:
 974        spin_unlock_irqrestore(&bank->lock, flags);
 975        return retval;
 976}
 977
 978static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
 979                unsigned debounce)
 980{
 981        struct gpio_bank *bank;
 982        unsigned long flags;
 983
 984        bank = container_of(chip, struct gpio_bank, chip);
 985
 986        spin_lock_irqsave(&bank->lock, flags);
 987        _set_gpio_debounce(bank, offset, debounce);
 988        spin_unlock_irqrestore(&bank->lock, flags);
 989
 990        return 0;
 991}
 992
 993static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 994{
 995        struct gpio_bank *bank;
 996        unsigned long flags;
 997
 998        bank = container_of(chip, struct gpio_bank, chip);
 999        spin_lock_irqsave(&bank->lock, flags);
1000        bank->set_dataout(bank, offset, value);
1001        spin_unlock_irqrestore(&bank->lock, flags);
1002}
1003
1004/*---------------------------------------------------------------------*/
1005
1006static void __init omap_gpio_show_rev(struct gpio_bank *bank)
1007{
1008        static bool called;
1009        u32 rev;
1010
1011        if (called || bank->regs->revision == USHRT_MAX)
1012                return;
1013
1014        rev = __raw_readw(bank->base + bank->regs->revision);
1015        pr_info("OMAP GPIO hardware version %d.%d\n",
1016                (rev >> 4) & 0x0f, rev & 0x0f);
1017
1018        called = true;
1019}
1020
1021/* This lock class tells lockdep that GPIO irqs are in a different
1022 * category than their parents, so it won't report false recursion.
1023 */
1024static struct lock_class_key gpio_lock_class;
1025
1026static void omap_gpio_mod_init(struct gpio_bank *bank)
1027{
1028        void __iomem *base = bank->base;
1029        u32 l = 0xffffffff;
1030
1031        if (bank->width == 16)
1032                l = 0xffff;
1033
1034        if (bank->is_mpuio) {
1035                __raw_writel(l, bank->base + bank->regs->irqenable);
1036                return;
1037        }
1038
1039        _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv);
1040        _gpio_rmw(base, bank->regs->irqstatus, l, !bank->regs->irqenable_inv);
1041        if (bank->regs->debounce_en)
1042                __raw_writel(0, base + bank->regs->debounce_en);
1043
1044        /* Save OE default value (0xffffffff) in the context */
1045        bank->context.oe = __raw_readl(bank->base + bank->regs->direction);
1046         /* Initialize interface clk ungated, module enabled */
1047        if (bank->regs->ctrl)
1048                __raw_writel(0, base + bank->regs->ctrl);
1049
1050        bank->dbck = clk_get(bank->dev, "dbclk");
1051        if (IS_ERR(bank->dbck))
1052                dev_err(bank->dev, "Could not get gpio dbck\n");
1053}
1054
1055static void
1056omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
1057                    unsigned int num)
1058{
1059        struct irq_chip_generic *gc;
1060        struct irq_chip_type *ct;
1061
1062        gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base,
1063                                    handle_simple_irq);
1064        if (!gc) {
1065                dev_err(bank->dev, "Memory alloc failed for gc\n");
1066                return;
1067        }
1068
1069        ct = gc->chip_types;
1070
1071        /* NOTE: No ack required, reading IRQ status clears it. */
1072        ct->chip.irq_mask = irq_gc_mask_set_bit;
1073        ct->chip.irq_unmask = irq_gc_mask_clr_bit;
1074        ct->chip.irq_set_type = gpio_irq_type;
1075
1076        if (bank->regs->wkup_en)
1077                ct->chip.irq_set_wake = gpio_wake_enable;
1078
1079        ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride;
1080        irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
1081                               IRQ_NOREQUEST | IRQ_NOPROBE, 0);
1082}
1083
1084static void omap_gpio_chip_init(struct gpio_bank *bank)
1085{
1086        int j;
1087        static int gpio;
1088
1089        /*
1090         * REVISIT eventually switch from OMAP-specific gpio structs
1091         * over to the generic ones
1092         */
1093        bank->chip.request = omap_gpio_request;
1094        bank->chip.free = omap_gpio_free;
1095        bank->chip.direction_input = gpio_input;
1096        bank->chip.get = gpio_get;
1097        bank->chip.direction_output = gpio_output;
1098        bank->chip.set_debounce = gpio_debounce;
1099        bank->chip.set = gpio_set;
1100        bank->chip.to_irq = omap_gpio_to_irq;
1101        if (bank->is_mpuio) {
1102                bank->chip.label = "mpuio";
1103                if (bank->regs->wkup_en)
1104                        bank->chip.dev = &omap_mpuio_device.dev;
1105                bank->chip.base = OMAP_MPUIO(0);
1106        } else {
1107                bank->chip.label = "gpio";
1108                bank->chip.base = gpio;
1109                gpio += bank->width;
1110        }
1111        bank->chip.ngpio = bank->width;
1112
1113        gpiochip_add(&bank->chip);
1114
1115        for (j = 0; j < bank->width; j++) {
1116                int irq = irq_create_mapping(bank->domain, j);
1117                irq_set_lockdep_class(irq, &gpio_lock_class);
1118                irq_set_chip_data(irq, bank);
1119                if (bank->is_mpuio) {
1120                        omap_mpuio_alloc_gc(bank, irq, bank->width);
1121                } else {
1122                        irq_set_chip_and_handler(irq, &gpio_irq_chip,
1123                                                 handle_simple_irq);
1124                        set_irq_flags(irq, IRQF_VALID);
1125                }
1126        }
1127        irq_set_chained_handler(bank->irq, gpio_irq_handler);
1128        irq_set_handler_data(bank->irq, bank);
1129}
1130
1131static const struct of_device_id omap_gpio_match[];
1132
1133static int omap_gpio_probe(struct platform_device *pdev)
1134{
1135        struct device *dev = &pdev->dev;
1136        struct device_node *node = dev->of_node;
1137        const struct of_device_id *match;
1138        const struct omap_gpio_platform_data *pdata;
1139        struct resource *res;
1140        struct gpio_bank *bank;
1141#ifdef CONFIG_ARCH_OMAP1
1142        int irq_base;
1143#endif
1144
1145        match = of_match_device(of_match_ptr(omap_gpio_match), dev);
1146
1147        pdata = match ? match->data : dev_get_platdata(dev);
1148        if (!pdata)
1149                return -EINVAL;
1150
1151        bank = devm_kzalloc(dev, sizeof(struct gpio_bank), GFP_KERNEL);
1152        if (!bank) {
1153                dev_err(dev, "Memory alloc failed\n");
1154                return -ENOMEM;
1155        }
1156
1157        res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1158        if (unlikely(!res)) {
1159                dev_err(dev, "Invalid IRQ resource\n");
1160                return -ENODEV;
1161        }
1162
1163        bank->irq = res->start;
1164        bank->dev = dev;
1165        bank->dbck_flag = pdata->dbck_flag;
1166        bank->stride = pdata->bank_stride;
1167        bank->width = pdata->bank_width;
1168        bank->is_mpuio = pdata->is_mpuio;
1169        bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
1170        bank->regs = pdata->regs;
1171#ifdef CONFIG_OF_GPIO
1172        bank->chip.of_node = of_node_get(node);
1173#endif
1174        if (node) {
1175                if (!of_property_read_bool(node, "ti,gpio-always-on"))
1176                        bank->loses_context = true;
1177        } else {
1178                bank->loses_context = pdata->loses_context;
1179
1180                if (bank->loses_context)
1181                        bank->get_context_loss_count =
1182                                pdata->get_context_loss_count;
1183        }
1184
1185#ifdef CONFIG_ARCH_OMAP1
1186        /*
1187         * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
1188         * irq_alloc_descs() and irq_domain_add_legacy() and just use a
1189         * linear IRQ domain mapping for all OMAP platforms.
1190         */
1191        irq_base = irq_alloc_descs(-1, 0, bank->width, 0);
1192        if (irq_base < 0) {
1193                dev_err(dev, "Couldn't allocate IRQ numbers\n");
1194                return -ENODEV;
1195        }
1196
1197        bank->domain = irq_domain_add_legacy(node, bank->width, irq_base,
1198                                             0, &irq_domain_simple_ops, NULL);
1199#else
1200        bank->domain = irq_domain_add_linear(node, bank->width,
1201                                             &irq_domain_simple_ops, NULL);
1202#endif
1203        if (!bank->domain) {
1204                dev_err(dev, "Couldn't register an IRQ domain\n");
1205                return -ENODEV;
1206        }
1207
1208        if (bank->regs->set_dataout && bank->regs->clr_dataout)
1209                bank->set_dataout = _set_gpio_dataout_reg;
1210        else
1211                bank->set_dataout = _set_gpio_dataout_mask;
1212
1213        spin_lock_init(&bank->lock);
1214
1215        /* Static mapping, never released */
1216        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1217        if (unlikely(!res)) {
1218                dev_err(dev, "Invalid mem resource\n");
1219                irq_domain_remove(bank->domain);
1220                return -ENODEV;
1221        }
1222
1223        if (!devm_request_mem_region(dev, res->start, resource_size(res),
1224                                     pdev->name)) {
1225                dev_err(dev, "Region already claimed\n");
1226                irq_domain_remove(bank->domain);
1227                return -EBUSY;
1228        }
1229
1230        bank->base = devm_ioremap(dev, res->start, resource_size(res));
1231        if (!bank->base) {
1232                dev_err(dev, "Could not ioremap\n");
1233                irq_domain_remove(bank->domain);
1234                return -ENOMEM;
1235        }
1236
1237        platform_set_drvdata(pdev, bank);
1238
1239        pm_runtime_enable(bank->dev);
1240        pm_runtime_irq_safe(bank->dev);
1241        pm_runtime_get_sync(bank->dev);
1242
1243        if (bank->is_mpuio)
1244                mpuio_init(bank);
1245
1246        omap_gpio_mod_init(bank);
1247        omap_gpio_chip_init(bank);
1248        omap_gpio_show_rev(bank);
1249
1250        pm_runtime_put(bank->dev);
1251
1252        list_add_tail(&bank->node, &omap_gpio_list);
1253
1254        return 0;
1255}
1256
1257#ifdef CONFIG_ARCH_OMAP2PLUS
1258
1259#if defined(CONFIG_PM_RUNTIME)
1260static void omap_gpio_restore_context(struct gpio_bank *bank);
1261
1262static int omap_gpio_runtime_suspend(struct device *dev)
1263{
1264        struct platform_device *pdev = to_platform_device(dev);
1265        struct gpio_bank *bank = platform_get_drvdata(pdev);
1266        u32 l1 = 0, l2 = 0;
1267        unsigned long flags;
1268        u32 wake_low, wake_hi;
1269
1270        spin_lock_irqsave(&bank->lock, flags);
1271
1272        /*
1273         * Only edges can generate a wakeup event to the PRCM.
1274         *
1275         * Therefore, ensure any wake-up capable GPIOs have
1276         * edge-detection enabled before going idle to ensure a wakeup
1277         * to the PRCM is generated on a GPIO transition. (c.f. 34xx
1278         * NDA TRM 25.5.3.1)
1279         *
1280         * The normal values will be restored upon ->runtime_resume()
1281         * by writing back the values saved in bank->context.
1282         */
1283        wake_low = bank->context.leveldetect0 & bank->context.wake_en;
1284        if (wake_low)
1285                __raw_writel(wake_low | bank->context.fallingdetect,
1286                             bank->base + bank->regs->fallingdetect);
1287        wake_hi = bank->context.leveldetect1 & bank->context.wake_en;
1288        if (wake_hi)
1289                __raw_writel(wake_hi | bank->context.risingdetect,
1290                             bank->base + bank->regs->risingdetect);
1291
1292        if (!bank->enabled_non_wakeup_gpios)
1293                goto update_gpio_context_count;
1294
1295        if (bank->power_mode != OFF_MODE) {
1296                bank->power_mode = 0;
1297                goto update_gpio_context_count;
1298        }
1299        /*
1300         * If going to OFF, remove triggering for all
1301         * non-wakeup GPIOs.  Otherwise spurious IRQs will be
1302         * generated.  See OMAP2420 Errata item 1.101.
1303         */
1304        bank->saved_datain = __raw_readl(bank->base +
1305                                                bank->regs->datain);
1306        l1 = bank->context.fallingdetect;
1307        l2 = bank->context.risingdetect;
1308
1309        l1 &= ~bank->enabled_non_wakeup_gpios;
1310        l2 &= ~bank->enabled_non_wakeup_gpios;
1311
1312        __raw_writel(l1, bank->base + bank->regs->fallingdetect);
1313        __raw_writel(l2, bank->base + bank->regs->risingdetect);
1314
1315        bank->workaround_enabled = true;
1316
1317update_gpio_context_count:
1318        if (bank->get_context_loss_count)
1319                bank->context_loss_count =
1320                                bank->get_context_loss_count(bank->dev);
1321
1322        _gpio_dbck_disable(bank);
1323        spin_unlock_irqrestore(&bank->lock, flags);
1324
1325        return 0;
1326}
1327
1328static void omap_gpio_init_context(struct gpio_bank *p);
1329
1330static int omap_gpio_runtime_resume(struct device *dev)
1331{
1332        struct platform_device *pdev = to_platform_device(dev);
1333        struct gpio_bank *bank = platform_get_drvdata(pdev);
1334        u32 l = 0, gen, gen0, gen1;
1335        unsigned long flags;
1336        int c;
1337
1338        spin_lock_irqsave(&bank->lock, flags);
1339
1340        /*
1341         * On the first resume during the probe, the context has not
1342         * been initialised and so initialise it now. Also initialise
1343         * the context loss count.
1344         */
1345        if (bank->loses_context && !bank->context_valid) {
1346                omap_gpio_init_context(bank);
1347
1348                if (bank->get_context_loss_count)
1349                        bank->context_loss_count =
1350                                bank->get_context_loss_count(bank->dev);
1351        }
1352
1353        _gpio_dbck_enable(bank);
1354
1355        /*
1356         * In ->runtime_suspend(), level-triggered, wakeup-enabled
1357         * GPIOs were set to edge trigger also in order to be able to
1358         * generate a PRCM wakeup.  Here we restore the
1359         * pre-runtime_suspend() values for edge triggering.
1360         */
1361        __raw_writel(bank->context.fallingdetect,
1362                     bank->base + bank->regs->fallingdetect);
1363        __raw_writel(bank->context.risingdetect,
1364                     bank->base + bank->regs->risingdetect);
1365
1366        if (bank->loses_context) {
1367                if (!bank->get_context_loss_count) {
1368                        omap_gpio_restore_context(bank);
1369                } else {
1370                        c = bank->get_context_loss_count(bank->dev);
1371                        if (c != bank->context_loss_count) {
1372                                omap_gpio_restore_context(bank);
1373                        } else {
1374                                spin_unlock_irqrestore(&bank->lock, flags);
1375                                return 0;
1376                        }
1377                }
1378        }
1379
1380        if (!bank->workaround_enabled) {
1381                spin_unlock_irqrestore(&bank->lock, flags);
1382                return 0;
1383        }
1384
1385        l = __raw_readl(bank->base + bank->regs->datain);
1386
1387        /*
1388         * Check if any of the non-wakeup interrupt GPIOs have changed
1389         * state.  If so, generate an IRQ by software.  This is
1390         * horribly racy, but it's the best we can do to work around
1391         * this silicon bug.
1392         */
1393        l ^= bank->saved_datain;
1394        l &= bank->enabled_non_wakeup_gpios;
1395
1396        /*
1397         * No need to generate IRQs for the rising edge for gpio IRQs
1398         * configured with falling edge only; and vice versa.
1399         */
1400        gen0 = l & bank->context.fallingdetect;
1401        gen0 &= bank->saved_datain;
1402
1403        gen1 = l & bank->context.risingdetect;
1404        gen1 &= ~(bank->saved_datain);
1405
1406        /* FIXME: Consider GPIO IRQs with level detections properly! */
1407        gen = l & (~(bank->context.fallingdetect) &
1408                                         ~(bank->context.risingdetect));
1409        /* Consider all GPIO IRQs needed to be updated */
1410        gen |= gen0 | gen1;
1411
1412        if (gen) {
1413                u32 old0, old1;
1414
1415                old0 = __raw_readl(bank->base + bank->regs->leveldetect0);
1416                old1 = __raw_readl(bank->base + bank->regs->leveldetect1);
1417
1418                if (!bank->regs->irqstatus_raw0) {
1419                        __raw_writel(old0 | gen, bank->base +
1420                                                bank->regs->leveldetect0);
1421                        __raw_writel(old1 | gen, bank->base +
1422                                                bank->regs->leveldetect1);
1423                }
1424
1425                if (bank->regs->irqstatus_raw0) {
1426                        __raw_writel(old0 | l, bank->base +
1427                                                bank->regs->leveldetect0);
1428                        __raw_writel(old1 | l, bank->base +
1429                                                bank->regs->leveldetect1);
1430                }
1431                __raw_writel(old0, bank->base + bank->regs->leveldetect0);
1432                __raw_writel(old1, bank->base + bank->regs->leveldetect1);
1433        }
1434
1435        bank->workaround_enabled = false;
1436        spin_unlock_irqrestore(&bank->lock, flags);
1437
1438        return 0;
1439}
1440#endif /* CONFIG_PM_RUNTIME */
1441
1442void omap2_gpio_prepare_for_idle(int pwr_mode)
1443{
1444        struct gpio_bank *bank;
1445
1446        list_for_each_entry(bank, &omap_gpio_list, node) {
1447                if (!BANK_USED(bank) || !bank->loses_context)
1448                        continue;
1449
1450                bank->power_mode = pwr_mode;
1451
1452                pm_runtime_put_sync_suspend(bank->dev);
1453        }
1454}
1455
1456void omap2_gpio_resume_after_idle(void)
1457{
1458        struct gpio_bank *bank;
1459
1460        list_for_each_entry(bank, &omap_gpio_list, node) {
1461                if (!BANK_USED(bank) || !bank->loses_context)
1462                        continue;
1463
1464                pm_runtime_get_sync(bank->dev);
1465        }
1466}
1467
1468#if defined(CONFIG_PM_RUNTIME)
1469static void omap_gpio_init_context(struct gpio_bank *p)
1470{
1471        struct omap_gpio_reg_offs *regs = p->regs;
1472        void __iomem *base = p->base;
1473
1474        p->context.ctrl         = __raw_readl(base + regs->ctrl);
1475        p->context.oe           = __raw_readl(base + regs->direction);
1476        p->context.wake_en      = __raw_readl(base + regs->wkup_en);
1477        p->context.leveldetect0 = __raw_readl(base + regs->leveldetect0);
1478        p->context.leveldetect1 = __raw_readl(base + regs->leveldetect1);
1479        p->context.risingdetect = __raw_readl(base + regs->risingdetect);
1480        p->context.fallingdetect = __raw_readl(base + regs->fallingdetect);
1481        p->context.irqenable1   = __raw_readl(base + regs->irqenable);
1482        p->context.irqenable2   = __raw_readl(base + regs->irqenable2);
1483
1484        if (regs->set_dataout && p->regs->clr_dataout)
1485                p->context.dataout = __raw_readl(base + regs->set_dataout);
1486        else
1487                p->context.dataout = __raw_readl(base + regs->dataout);
1488
1489        p->context_valid = true;
1490}
1491
1492static void omap_gpio_restore_context(struct gpio_bank *bank)
1493{
1494        __raw_writel(bank->context.wake_en,
1495                                bank->base + bank->regs->wkup_en);
1496        __raw_writel(bank->context.ctrl, bank->base + bank->regs->ctrl);
1497        __raw_writel(bank->context.leveldetect0,
1498                                bank->base + bank->regs->leveldetect0);
1499        __raw_writel(bank->context.leveldetect1,
1500                                bank->base + bank->regs->leveldetect1);
1501        __raw_writel(bank->context.risingdetect,
1502                                bank->base + bank->regs->risingdetect);
1503        __raw_writel(bank->context.fallingdetect,
1504                                bank->base + bank->regs->fallingdetect);
1505        if (bank->regs->set_dataout && bank->regs->clr_dataout)
1506                __raw_writel(bank->context.dataout,
1507                                bank->base + bank->regs->set_dataout);
1508        else
1509                __raw_writel(bank->context.dataout,
1510                                bank->base + bank->regs->dataout);
1511        __raw_writel(bank->context.oe, bank->base + bank->regs->direction);
1512
1513        if (bank->dbck_enable_mask) {
1514                __raw_writel(bank->context.debounce, bank->base +
1515                                        bank->regs->debounce);
1516                __raw_writel(bank->context.debounce_en,
1517                                        bank->base + bank->regs->debounce_en);
1518        }
1519
1520        __raw_writel(bank->context.irqenable1,
1521                                bank->base + bank->regs->irqenable);
1522        __raw_writel(bank->context.irqenable2,
1523                                bank->base + bank->regs->irqenable2);
1524}
1525#endif /* CONFIG_PM_RUNTIME */
1526#else
1527#define omap_gpio_runtime_suspend NULL
1528#define omap_gpio_runtime_resume NULL
1529static inline void omap_gpio_init_context(struct gpio_bank *p) {}
1530#endif
1531
1532static const struct dev_pm_ops gpio_pm_ops = {
1533        SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
1534                                                                        NULL)
1535};
1536
1537#if defined(CONFIG_OF)
1538static struct omap_gpio_reg_offs omap2_gpio_regs = {
1539        .revision =             OMAP24XX_GPIO_REVISION,
1540        .direction =            OMAP24XX_GPIO_OE,
1541        .datain =               OMAP24XX_GPIO_DATAIN,
1542        .dataout =              OMAP24XX_GPIO_DATAOUT,
1543        .set_dataout =          OMAP24XX_GPIO_SETDATAOUT,
1544        .clr_dataout =          OMAP24XX_GPIO_CLEARDATAOUT,
1545        .irqstatus =            OMAP24XX_GPIO_IRQSTATUS1,
1546        .irqstatus2 =           OMAP24XX_GPIO_IRQSTATUS2,
1547        .irqenable =            OMAP24XX_GPIO_IRQENABLE1,
1548        .irqenable2 =           OMAP24XX_GPIO_IRQENABLE2,
1549        .set_irqenable =        OMAP24XX_GPIO_SETIRQENABLE1,
1550        .clr_irqenable =        OMAP24XX_GPIO_CLEARIRQENABLE1,
1551        .debounce =             OMAP24XX_GPIO_DEBOUNCE_VAL,
1552        .debounce_en =          OMAP24XX_GPIO_DEBOUNCE_EN,
1553        .ctrl =                 OMAP24XX_GPIO_CTRL,
1554        .wkup_en =              OMAP24XX_GPIO_WAKE_EN,
1555        .leveldetect0 =         OMAP24XX_GPIO_LEVELDETECT0,
1556        .leveldetect1 =         OMAP24XX_GPIO_LEVELDETECT1,
1557        .risingdetect =         OMAP24XX_GPIO_RISINGDETECT,
1558        .fallingdetect =        OMAP24XX_GPIO_FALLINGDETECT,
1559};
1560
1561static struct omap_gpio_reg_offs omap4_gpio_regs = {
1562        .revision =             OMAP4_GPIO_REVISION,
1563        .direction =            OMAP4_GPIO_OE,
1564        .datain =               OMAP4_GPIO_DATAIN,
1565        .dataout =              OMAP4_GPIO_DATAOUT,
1566        .set_dataout =          OMAP4_GPIO_SETDATAOUT,
1567        .clr_dataout =          OMAP4_GPIO_CLEARDATAOUT,
1568        .irqstatus =            OMAP4_GPIO_IRQSTATUS0,
1569        .irqstatus2 =           OMAP4_GPIO_IRQSTATUS1,
1570        .irqenable =            OMAP4_GPIO_IRQSTATUSSET0,
1571        .irqenable2 =           OMAP4_GPIO_IRQSTATUSSET1,
1572        .set_irqenable =        OMAP4_GPIO_IRQSTATUSSET0,
1573        .clr_irqenable =        OMAP4_GPIO_IRQSTATUSCLR0,
1574        .debounce =             OMAP4_GPIO_DEBOUNCINGTIME,
1575        .debounce_en =          OMAP4_GPIO_DEBOUNCENABLE,
1576        .ctrl =                 OMAP4_GPIO_CTRL,
1577        .wkup_en =              OMAP4_GPIO_IRQWAKEN0,
1578        .leveldetect0 =         OMAP4_GPIO_LEVELDETECT0,
1579        .leveldetect1 =         OMAP4_GPIO_LEVELDETECT1,
1580        .risingdetect =         OMAP4_GPIO_RISINGDETECT,
1581        .fallingdetect =        OMAP4_GPIO_FALLINGDETECT,
1582};
1583
1584static const struct omap_gpio_platform_data omap2_pdata = {
1585        .regs = &omap2_gpio_regs,
1586        .bank_width = 32,
1587        .dbck_flag = false,
1588};
1589
1590static const struct omap_gpio_platform_data omap3_pdata = {
1591        .regs = &omap2_gpio_regs,
1592        .bank_width = 32,
1593        .dbck_flag = true,
1594};
1595
1596static const struct omap_gpio_platform_data omap4_pdata = {
1597        .regs = &omap4_gpio_regs,
1598        .bank_width = 32,
1599        .dbck_flag = true,
1600};
1601
1602static const struct of_device_id omap_gpio_match[] = {
1603        {
1604                .compatible = "ti,omap4-gpio",
1605                .data = &omap4_pdata,
1606        },
1607        {
1608                .compatible = "ti,omap3-gpio",
1609                .data = &omap3_pdata,
1610        },
1611        {
1612                .compatible = "ti,omap2-gpio",
1613                .data = &omap2_pdata,
1614        },
1615        { },
1616};
1617MODULE_DEVICE_TABLE(of, omap_gpio_match);
1618#endif
1619
1620static struct platform_driver omap_gpio_driver = {
1621        .probe          = omap_gpio_probe,
1622        .driver         = {
1623                .name   = "omap_gpio",
1624                .pm     = &gpio_pm_ops,
1625                .of_match_table = of_match_ptr(omap_gpio_match),
1626        },
1627};
1628
1629/*
1630 * gpio driver register needs to be done before
1631 * machine_init functions access gpio APIs.
1632 * Hence omap_gpio_drv_reg() is a postcore_initcall.
1633 */
1634static int __init omap_gpio_drv_reg(void)
1635{
1636        return platform_driver_register(&omap_gpio_driver);
1637}
1638postcore_initcall(omap_gpio_drv_reg);
1639