linux/drivers/media/rc/nuvoton-cir.c
<<
>>
Prefs
   1/*
   2 * Driver for Nuvoton Technology Corporation w83667hg/w83677hg-i CIR
   3 *
   4 * Copyright (C) 2010 Jarod Wilson <jarod@redhat.com>
   5 * Copyright (C) 2009 Nuvoton PS Team
   6 *
   7 * Special thanks to Nuvoton for providing hardware, spec sheets and
   8 * sample code upon which portions of this driver are based. Indirect
   9 * thanks also to Maxim Levitsky, whose ene_ir driver this driver is
  10 * modeled after.
  11 *
  12 * This program is free software; you can redistribute it and/or
  13 * modify it under the terms of the GNU General Public License as
  14 * published by the Free Software Foundation; either version 2 of the
  15 * License, or (at your option) any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful, but
  18 * WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  20 * General Public License for more details.
  21 *
  22 * You should have received a copy of the GNU General Public License
  23 * along with this program; if not, write to the Free Software
  24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  25 * USA
  26 */
  27
  28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29
  30#include <linux/kernel.h>
  31#include <linux/module.h>
  32#include <linux/pnp.h>
  33#include <linux/io.h>
  34#include <linux/interrupt.h>
  35#include <linux/sched.h>
  36#include <linux/slab.h>
  37#include <media/rc-core.h>
  38#include <linux/pci_ids.h>
  39
  40#include "nuvoton-cir.h"
  41
  42/* write val to config reg */
  43static inline void nvt_cr_write(struct nvt_dev *nvt, u8 val, u8 reg)
  44{
  45        outb(reg, nvt->cr_efir);
  46        outb(val, nvt->cr_efdr);
  47}
  48
  49/* read val from config reg */
  50static inline u8 nvt_cr_read(struct nvt_dev *nvt, u8 reg)
  51{
  52        outb(reg, nvt->cr_efir);
  53        return inb(nvt->cr_efdr);
  54}
  55
  56/* update config register bit without changing other bits */
  57static inline void nvt_set_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
  58{
  59        u8 tmp = nvt_cr_read(nvt, reg) | val;
  60        nvt_cr_write(nvt, tmp, reg);
  61}
  62
  63/* clear config register bit without changing other bits */
  64static inline void nvt_clear_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
  65{
  66        u8 tmp = nvt_cr_read(nvt, reg) & ~val;
  67        nvt_cr_write(nvt, tmp, reg);
  68}
  69
  70/* enter extended function mode */
  71static inline void nvt_efm_enable(struct nvt_dev *nvt)
  72{
  73        /* Enabling Extended Function Mode explicitly requires writing 2x */
  74        outb(EFER_EFM_ENABLE, nvt->cr_efir);
  75        outb(EFER_EFM_ENABLE, nvt->cr_efir);
  76}
  77
  78/* exit extended function mode */
  79static inline void nvt_efm_disable(struct nvt_dev *nvt)
  80{
  81        outb(EFER_EFM_DISABLE, nvt->cr_efir);
  82}
  83
  84/*
  85 * When you want to address a specific logical device, write its logical
  86 * device number to CR_LOGICAL_DEV_SEL, then enable/disable by writing
  87 * 0x1/0x0 respectively to CR_LOGICAL_DEV_EN.
  88 */
  89static inline void nvt_select_logical_dev(struct nvt_dev *nvt, u8 ldev)
  90{
  91        outb(CR_LOGICAL_DEV_SEL, nvt->cr_efir);
  92        outb(ldev, nvt->cr_efdr);
  93}
  94
  95/* write val to cir config register */
  96static inline void nvt_cir_reg_write(struct nvt_dev *nvt, u8 val, u8 offset)
  97{
  98        outb(val, nvt->cir_addr + offset);
  99}
 100
 101/* read val from cir config register */
 102static u8 nvt_cir_reg_read(struct nvt_dev *nvt, u8 offset)
 103{
 104        u8 val;
 105
 106        val = inb(nvt->cir_addr + offset);
 107
 108        return val;
 109}
 110
 111/* write val to cir wake register */
 112static inline void nvt_cir_wake_reg_write(struct nvt_dev *nvt,
 113                                          u8 val, u8 offset)
 114{
 115        outb(val, nvt->cir_wake_addr + offset);
 116}
 117
 118/* read val from cir wake config register */
 119static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset)
 120{
 121        u8 val;
 122
 123        val = inb(nvt->cir_wake_addr + offset);
 124
 125        return val;
 126}
 127
 128/* dump current cir register contents */
 129static void cir_dump_regs(struct nvt_dev *nvt)
 130{
 131        nvt_efm_enable(nvt);
 132        nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
 133
 134        pr_info("%s: Dump CIR logical device registers:\n", NVT_DRIVER_NAME);
 135        pr_info(" * CR CIR ACTIVE :   0x%x\n",
 136                nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
 137        pr_info(" * CR CIR BASE ADDR: 0x%x\n",
 138                (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
 139                nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
 140        pr_info(" * CR CIR IRQ NUM:   0x%x\n",
 141                nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
 142
 143        nvt_efm_disable(nvt);
 144
 145        pr_info("%s: Dump CIR registers:\n", NVT_DRIVER_NAME);
 146        pr_info(" * IRCON:     0x%x\n", nvt_cir_reg_read(nvt, CIR_IRCON));
 147        pr_info(" * IRSTS:     0x%x\n", nvt_cir_reg_read(nvt, CIR_IRSTS));
 148        pr_info(" * IREN:      0x%x\n", nvt_cir_reg_read(nvt, CIR_IREN));
 149        pr_info(" * RXFCONT:   0x%x\n", nvt_cir_reg_read(nvt, CIR_RXFCONT));
 150        pr_info(" * CP:        0x%x\n", nvt_cir_reg_read(nvt, CIR_CP));
 151        pr_info(" * CC:        0x%x\n", nvt_cir_reg_read(nvt, CIR_CC));
 152        pr_info(" * SLCH:      0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCH));
 153        pr_info(" * SLCL:      0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCL));
 154        pr_info(" * FIFOCON:   0x%x\n", nvt_cir_reg_read(nvt, CIR_FIFOCON));
 155        pr_info(" * IRFIFOSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFIFOSTS));
 156        pr_info(" * SRXFIFO:   0x%x\n", nvt_cir_reg_read(nvt, CIR_SRXFIFO));
 157        pr_info(" * TXFCONT:   0x%x\n", nvt_cir_reg_read(nvt, CIR_TXFCONT));
 158        pr_info(" * STXFIFO:   0x%x\n", nvt_cir_reg_read(nvt, CIR_STXFIFO));
 159        pr_info(" * FCCH:      0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCH));
 160        pr_info(" * FCCL:      0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCL));
 161        pr_info(" * IRFSM:     0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFSM));
 162}
 163
 164/* dump current cir wake register contents */
 165static void cir_wake_dump_regs(struct nvt_dev *nvt)
 166{
 167        u8 i, fifo_len;
 168
 169        nvt_efm_enable(nvt);
 170        nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
 171
 172        pr_info("%s: Dump CIR WAKE logical device registers:\n",
 173                NVT_DRIVER_NAME);
 174        pr_info(" * CR CIR WAKE ACTIVE :   0x%x\n",
 175                nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
 176        pr_info(" * CR CIR WAKE BASE ADDR: 0x%x\n",
 177                (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
 178                nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
 179        pr_info(" * CR CIR WAKE IRQ NUM:   0x%x\n",
 180                nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
 181
 182        nvt_efm_disable(nvt);
 183
 184        pr_info("%s: Dump CIR WAKE registers\n", NVT_DRIVER_NAME);
 185        pr_info(" * IRCON:          0x%x\n",
 186                nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON));
 187        pr_info(" * IRSTS:          0x%x\n",
 188                nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS));
 189        pr_info(" * IREN:           0x%x\n",
 190                nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN));
 191        pr_info(" * FIFO CMP DEEP:  0x%x\n",
 192                nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_DEEP));
 193        pr_info(" * FIFO CMP TOL:   0x%x\n",
 194                nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_TOL));
 195        pr_info(" * FIFO COUNT:     0x%x\n",
 196                nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT));
 197        pr_info(" * SLCH:           0x%x\n",
 198                nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCH));
 199        pr_info(" * SLCL:           0x%x\n",
 200                nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCL));
 201        pr_info(" * FIFOCON:        0x%x\n",
 202                nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON));
 203        pr_info(" * SRXFSTS:        0x%x\n",
 204                nvt_cir_wake_reg_read(nvt, CIR_WAKE_SRXFSTS));
 205        pr_info(" * SAMPLE RX FIFO: 0x%x\n",
 206                nvt_cir_wake_reg_read(nvt, CIR_WAKE_SAMPLE_RX_FIFO));
 207        pr_info(" * WR FIFO DATA:   0x%x\n",
 208                nvt_cir_wake_reg_read(nvt, CIR_WAKE_WR_FIFO_DATA));
 209        pr_info(" * RD FIFO ONLY:   0x%x\n",
 210                nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
 211        pr_info(" * RD FIFO ONLY IDX: 0x%x\n",
 212                nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX));
 213        pr_info(" * FIFO IGNORE:    0x%x\n",
 214                nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_IGNORE));
 215        pr_info(" * IRFSM:          0x%x\n",
 216                nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRFSM));
 217
 218        fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
 219        pr_info("%s: Dump CIR WAKE FIFO (len %d)\n", NVT_DRIVER_NAME, fifo_len);
 220        pr_info("* Contents =");
 221        for (i = 0; i < fifo_len; i++)
 222                pr_cont(" %02x",
 223                        nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
 224        pr_cont("\n");
 225}
 226
 227/* detect hardware features */
 228static int nvt_hw_detect(struct nvt_dev *nvt)
 229{
 230        unsigned long flags;
 231        u8 chip_major, chip_minor;
 232        char chip_id[12];
 233        bool chip_unknown = false;
 234
 235        nvt_efm_enable(nvt);
 236
 237        /* Check if we're wired for the alternate EFER setup */
 238        chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
 239        if (chip_major == 0xff) {
 240                nvt->cr_efir = CR_EFIR2;
 241                nvt->cr_efdr = CR_EFDR2;
 242                nvt_efm_enable(nvt);
 243                chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
 244        }
 245
 246        chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO);
 247
 248        /* these are the known working chip revisions... */
 249        switch (chip_major) {
 250        case CHIP_ID_HIGH_667:
 251                strcpy(chip_id, "w83667hg\0");
 252                if (chip_minor != CHIP_ID_LOW_667)
 253                        chip_unknown = true;
 254                break;
 255        case CHIP_ID_HIGH_677B:
 256                strcpy(chip_id, "w83677hg\0");
 257                if (chip_minor != CHIP_ID_LOW_677B2 &&
 258                    chip_minor != CHIP_ID_LOW_677B3)
 259                        chip_unknown = true;
 260                break;
 261        case CHIP_ID_HIGH_677C:
 262                strcpy(chip_id, "w83677hg-c\0");
 263                if (chip_minor != CHIP_ID_LOW_677C)
 264                        chip_unknown = true;
 265                break;
 266        default:
 267                strcpy(chip_id, "w836x7hg\0");
 268                chip_unknown = true;
 269                break;
 270        }
 271
 272        /* warn, but still let the driver load, if we don't know this chip */
 273        if (chip_unknown)
 274                nvt_pr(KERN_WARNING, "%s: unknown chip, id: 0x%02x 0x%02x, "
 275                       "it may not work...", chip_id, chip_major, chip_minor);
 276        else
 277                nvt_dbg("%s: chip id: 0x%02x 0x%02x",
 278                        chip_id, chip_major, chip_minor);
 279
 280        nvt_efm_disable(nvt);
 281
 282        spin_lock_irqsave(&nvt->nvt_lock, flags);
 283        nvt->chip_major = chip_major;
 284        nvt->chip_minor = chip_minor;
 285        spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 286
 287        return 0;
 288}
 289
 290static void nvt_cir_ldev_init(struct nvt_dev *nvt)
 291{
 292        u8 val, psreg, psmask, psval;
 293
 294        if (nvt->chip_major == CHIP_ID_HIGH_667) {
 295                psreg = CR_MULTIFUNC_PIN_SEL;
 296                psmask = MULTIFUNC_PIN_SEL_MASK;
 297                psval = MULTIFUNC_ENABLE_CIR | MULTIFUNC_ENABLE_CIRWB;
 298        } else {
 299                psreg = CR_OUTPUT_PIN_SEL;
 300                psmask = OUTPUT_PIN_SEL_MASK;
 301                psval = OUTPUT_ENABLE_CIR | OUTPUT_ENABLE_CIRWB;
 302        }
 303
 304        /* output pin selection: enable CIR, with WB sensor enabled */
 305        val = nvt_cr_read(nvt, psreg);
 306        val &= psmask;
 307        val |= psval;
 308        nvt_cr_write(nvt, val, psreg);
 309
 310        /* Select CIR logical device and enable */
 311        nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
 312        nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
 313
 314        nvt_cr_write(nvt, nvt->cir_addr >> 8, CR_CIR_BASE_ADDR_HI);
 315        nvt_cr_write(nvt, nvt->cir_addr & 0xff, CR_CIR_BASE_ADDR_LO);
 316
 317        nvt_cr_write(nvt, nvt->cir_irq, CR_CIR_IRQ_RSRC);
 318
 319        nvt_dbg("CIR initialized, base io port address: 0x%lx, irq: %d",
 320                nvt->cir_addr, nvt->cir_irq);
 321}
 322
 323static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt)
 324{
 325        /* Select ACPI logical device, enable it and CIR Wake */
 326        nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
 327        nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
 328
 329        /* Enable CIR Wake via PSOUT# (Pin60) */
 330        nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
 331
 332        /* enable pme interrupt of cir wakeup event */
 333        nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
 334
 335        /* Select CIR Wake logical device and enable */
 336        nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
 337        nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
 338
 339        nvt_cr_write(nvt, nvt->cir_wake_addr >> 8, CR_CIR_BASE_ADDR_HI);
 340        nvt_cr_write(nvt, nvt->cir_wake_addr & 0xff, CR_CIR_BASE_ADDR_LO);
 341
 342        nvt_cr_write(nvt, nvt->cir_wake_irq, CR_CIR_IRQ_RSRC);
 343
 344        nvt_dbg("CIR Wake initialized, base io port address: 0x%lx, irq: %d",
 345                nvt->cir_wake_addr, nvt->cir_wake_irq);
 346}
 347
 348/* clear out the hardware's cir rx fifo */
 349static void nvt_clear_cir_fifo(struct nvt_dev *nvt)
 350{
 351        u8 val;
 352
 353        val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
 354        nvt_cir_reg_write(nvt, val | CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
 355}
 356
 357/* clear out the hardware's cir wake rx fifo */
 358static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt)
 359{
 360        u8 val;
 361
 362        val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON);
 363        nvt_cir_wake_reg_write(nvt, val | CIR_WAKE_FIFOCON_RXFIFOCLR,
 364                               CIR_WAKE_FIFOCON);
 365}
 366
 367/* clear out the hardware's cir tx fifo */
 368static void nvt_clear_tx_fifo(struct nvt_dev *nvt)
 369{
 370        u8 val;
 371
 372        val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
 373        nvt_cir_reg_write(nvt, val | CIR_FIFOCON_TXFIFOCLR, CIR_FIFOCON);
 374}
 375
 376/* enable RX Trigger Level Reach and Packet End interrupts */
 377static void nvt_set_cir_iren(struct nvt_dev *nvt)
 378{
 379        u8 iren;
 380
 381        iren = CIR_IREN_RTR | CIR_IREN_PE;
 382        nvt_cir_reg_write(nvt, iren, CIR_IREN);
 383}
 384
 385static void nvt_cir_regs_init(struct nvt_dev *nvt)
 386{
 387        /* set sample limit count (PE interrupt raised when reached) */
 388        nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_SLCH);
 389        nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_SLCL);
 390
 391        /* set fifo irq trigger levels */
 392        nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV |
 393                          CIR_FIFOCON_RX_TRIGGER_LEV, CIR_FIFOCON);
 394
 395        /*
 396         * Enable TX and RX, specify carrier on = low, off = high, and set
 397         * sample period (currently 50us)
 398         */
 399        nvt_cir_reg_write(nvt,
 400                          CIR_IRCON_TXEN | CIR_IRCON_RXEN |
 401                          CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
 402                          CIR_IRCON);
 403
 404        /* clear hardware rx and tx fifos */
 405        nvt_clear_cir_fifo(nvt);
 406        nvt_clear_tx_fifo(nvt);
 407
 408        /* clear any and all stray interrupts */
 409        nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
 410
 411        /* and finally, enable interrupts */
 412        nvt_set_cir_iren(nvt);
 413}
 414
 415static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
 416{
 417        /* set number of bytes needed for wake from s3 (default 65) */
 418        nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_CMP_BYTES,
 419                               CIR_WAKE_FIFO_CMP_DEEP);
 420
 421        /* set tolerance/variance allowed per byte during wake compare */
 422        nvt_cir_wake_reg_write(nvt, CIR_WAKE_CMP_TOLERANCE,
 423                               CIR_WAKE_FIFO_CMP_TOL);
 424
 425        /* set sample limit count (PE interrupt raised when reached) */
 426        nvt_cir_wake_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_WAKE_SLCH);
 427        nvt_cir_wake_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_WAKE_SLCL);
 428
 429        /* set cir wake fifo rx trigger level (currently 67) */
 430        nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFOCON_RX_TRIGGER_LEV,
 431                               CIR_WAKE_FIFOCON);
 432
 433        /*
 434         * Enable TX and RX, specific carrier on = low, off = high, and set
 435         * sample period (currently 50us)
 436         */
 437        nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
 438                               CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
 439                               CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
 440                               CIR_WAKE_IRCON);
 441
 442        /* clear cir wake rx fifo */
 443        nvt_clear_cir_wake_fifo(nvt);
 444
 445        /* clear any and all stray interrupts */
 446        nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
 447}
 448
 449static void nvt_enable_wake(struct nvt_dev *nvt)
 450{
 451        nvt_efm_enable(nvt);
 452
 453        nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
 454        nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
 455        nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
 456
 457        nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
 458        nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
 459
 460        nvt_efm_disable(nvt);
 461
 462        nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
 463                               CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
 464                               CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
 465                               CIR_WAKE_IRCON);
 466        nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
 467        nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
 468}
 469
 470#if 0 /* Currently unused */
 471/* rx carrier detect only works in learning mode, must be called w/nvt_lock */
 472static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
 473{
 474        u32 count, carrier, duration = 0;
 475        int i;
 476
 477        count = nvt_cir_reg_read(nvt, CIR_FCCL) |
 478                nvt_cir_reg_read(nvt, CIR_FCCH) << 8;
 479
 480        for (i = 0; i < nvt->pkts; i++) {
 481                if (nvt->buf[i] & BUF_PULSE_BIT)
 482                        duration += nvt->buf[i] & BUF_LEN_MASK;
 483        }
 484
 485        duration *= SAMPLE_PERIOD;
 486
 487        if (!count || !duration) {
 488                nvt_pr(KERN_NOTICE, "Unable to determine carrier! (c:%u, d:%u)",
 489                       count, duration);
 490                return 0;
 491        }
 492
 493        carrier = MS_TO_NS(count) / duration;
 494
 495        if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER))
 496                nvt_dbg("WTF? Carrier frequency out of range!");
 497
 498        nvt_dbg("Carrier frequency: %u (count %u, duration %u)",
 499                carrier, count, duration);
 500
 501        return carrier;
 502}
 503#endif
 504/*
 505 * set carrier frequency
 506 *
 507 * set carrier on 2 registers: CP & CC
 508 * always set CP as 0x81
 509 * set CC by SPEC, CC = 3MHz/carrier - 1
 510 */
 511static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
 512{
 513        struct nvt_dev *nvt = dev->priv;
 514        u16 val;
 515
 516        if (carrier == 0)
 517                return -EINVAL;
 518
 519        nvt_cir_reg_write(nvt, 1, CIR_CP);
 520        val = 3000000 / (carrier) - 1;
 521        nvt_cir_reg_write(nvt, val & 0xff, CIR_CC);
 522
 523        nvt_dbg("cp: 0x%x cc: 0x%x\n",
 524                nvt_cir_reg_read(nvt, CIR_CP), nvt_cir_reg_read(nvt, CIR_CC));
 525
 526        return 0;
 527}
 528
 529/*
 530 * nvt_tx_ir
 531 *
 532 * 1) clean TX fifo first (handled by AP)
 533 * 2) copy data from user space
 534 * 3) disable RX interrupts, enable TX interrupts: TTR & TFU
 535 * 4) send 9 packets to TX FIFO to open TTR
 536 * in interrupt_handler:
 537 * 5) send all data out
 538 * go back to write():
 539 * 6) disable TX interrupts, re-enable RX interupts
 540 *
 541 * The key problem of this function is user space data may larger than
 542 * driver's data buf length. So nvt_tx_ir() will only copy TX_BUF_LEN data to
 543 * buf, and keep current copied data buf num in cur_buf_num. But driver's buf
 544 * number may larger than TXFCONT (0xff). So in interrupt_handler, it has to
 545 * set TXFCONT as 0xff, until buf_count less than 0xff.
 546 */
 547static int nvt_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned n)
 548{
 549        struct nvt_dev *nvt = dev->priv;
 550        unsigned long flags;
 551        unsigned int i;
 552        u8 iren;
 553        int ret;
 554
 555        spin_lock_irqsave(&nvt->tx.lock, flags);
 556
 557        ret = min((unsigned)(TX_BUF_LEN / sizeof(unsigned)), n);
 558        nvt->tx.buf_count = (ret * sizeof(unsigned));
 559
 560        memcpy(nvt->tx.buf, txbuf, nvt->tx.buf_count);
 561
 562        nvt->tx.cur_buf_num = 0;
 563
 564        /* save currently enabled interrupts */
 565        iren = nvt_cir_reg_read(nvt, CIR_IREN);
 566
 567        /* now disable all interrupts, save TFU & TTR */
 568        nvt_cir_reg_write(nvt, CIR_IREN_TFU | CIR_IREN_TTR, CIR_IREN);
 569
 570        nvt->tx.tx_state = ST_TX_REPLY;
 571
 572        nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV_8 |
 573                          CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
 574
 575        /* trigger TTR interrupt by writing out ones, (yes, it's ugly) */
 576        for (i = 0; i < 9; i++)
 577                nvt_cir_reg_write(nvt, 0x01, CIR_STXFIFO);
 578
 579        spin_unlock_irqrestore(&nvt->tx.lock, flags);
 580
 581        wait_event(nvt->tx.queue, nvt->tx.tx_state == ST_TX_REQUEST);
 582
 583        spin_lock_irqsave(&nvt->tx.lock, flags);
 584        nvt->tx.tx_state = ST_TX_NONE;
 585        spin_unlock_irqrestore(&nvt->tx.lock, flags);
 586
 587        /* restore enabled interrupts to prior state */
 588        nvt_cir_reg_write(nvt, iren, CIR_IREN);
 589
 590        return ret;
 591}
 592
 593/* dump contents of the last rx buffer we got from the hw rx fifo */
 594static void nvt_dump_rx_buf(struct nvt_dev *nvt)
 595{
 596        int i;
 597
 598        printk(KERN_DEBUG "%s (len %d): ", __func__, nvt->pkts);
 599        for (i = 0; (i < nvt->pkts) && (i < RX_BUF_LEN); i++)
 600                printk(KERN_CONT "0x%02x ", nvt->buf[i]);
 601        printk(KERN_CONT "\n");
 602}
 603
 604/*
 605 * Process raw data in rx driver buffer, store it in raw IR event kfifo,
 606 * trigger decode when appropriate.
 607 *
 608 * We get IR data samples one byte at a time. If the msb is set, its a pulse,
 609 * otherwise its a space. The lower 7 bits are the count of SAMPLE_PERIOD
 610 * (default 50us) intervals for that pulse/space. A discrete signal is
 611 * followed by a series of 0x7f packets, then either 0x7<something> or 0x80
 612 * to signal more IR coming (repeats) or end of IR, respectively. We store
 613 * sample data in the raw event kfifo until we see 0x7<something> (except f)
 614 * or 0x80, at which time, we trigger a decode operation.
 615 */
 616static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
 617{
 618        DEFINE_IR_RAW_EVENT(rawir);
 619        u8 sample;
 620        int i;
 621
 622        nvt_dbg_verbose("%s firing", __func__);
 623
 624        if (debug)
 625                nvt_dump_rx_buf(nvt);
 626
 627        nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);
 628
 629        init_ir_raw_event(&rawir);
 630
 631        for (i = 0; i < nvt->pkts; i++) {
 632                sample = nvt->buf[i];
 633
 634                rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
 635                rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
 636                                          * SAMPLE_PERIOD);
 637
 638                nvt_dbg("Storing %s with duration %d",
 639                        rawir.pulse ? "pulse" : "space", rawir.duration);
 640
 641                ir_raw_event_store_with_filter(nvt->rdev, &rawir);
 642
 643                /*
 644                 * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE
 645                 * indicates end of IR signal, but new data incoming. In both
 646                 * cases, it means we're ready to call ir_raw_event_handle
 647                 */
 648                if ((sample == BUF_PULSE_BIT) && (i + 1 < nvt->pkts)) {
 649                        nvt_dbg("Calling ir_raw_event_handle (signal end)\n");
 650                        ir_raw_event_handle(nvt->rdev);
 651                }
 652        }
 653
 654        nvt->pkts = 0;
 655
 656        nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n");
 657        ir_raw_event_handle(nvt->rdev);
 658
 659        nvt_dbg_verbose("%s done", __func__);
 660}
 661
 662static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt)
 663{
 664        nvt_pr(KERN_WARNING, "RX FIFO overrun detected, flushing data!");
 665
 666        nvt->pkts = 0;
 667        nvt_clear_cir_fifo(nvt);
 668        ir_raw_event_reset(nvt->rdev);
 669}
 670
 671/* copy data from hardware rx fifo into driver buffer */
 672static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
 673{
 674        unsigned long flags;
 675        u8 fifocount, val;
 676        unsigned int b_idx;
 677        bool overrun = false;
 678        int i;
 679
 680        /* Get count of how many bytes to read from RX FIFO */
 681        fifocount = nvt_cir_reg_read(nvt, CIR_RXFCONT);
 682        /* if we get 0xff, probably means the logical dev is disabled */
 683        if (fifocount == 0xff)
 684                return;
 685        /* watch out for a fifo overrun condition */
 686        else if (fifocount > RX_BUF_LEN) {
 687                overrun = true;
 688                fifocount = RX_BUF_LEN;
 689        }
 690
 691        nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount);
 692
 693        spin_lock_irqsave(&nvt->nvt_lock, flags);
 694
 695        b_idx = nvt->pkts;
 696
 697        /* This should never happen, but lets check anyway... */
 698        if (b_idx + fifocount > RX_BUF_LEN) {
 699                nvt_process_rx_ir_data(nvt);
 700                b_idx = 0;
 701        }
 702
 703        /* Read fifocount bytes from CIR Sample RX FIFO register */
 704        for (i = 0; i < fifocount; i++) {
 705                val = nvt_cir_reg_read(nvt, CIR_SRXFIFO);
 706                nvt->buf[b_idx + i] = val;
 707        }
 708
 709        nvt->pkts += fifocount;
 710        nvt_dbg("%s: pkts now %d", __func__, nvt->pkts);
 711
 712        nvt_process_rx_ir_data(nvt);
 713
 714        if (overrun)
 715                nvt_handle_rx_fifo_overrun(nvt);
 716
 717        spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 718}
 719
 720static void nvt_cir_log_irqs(u8 status, u8 iren)
 721{
 722        nvt_pr(KERN_INFO, "IRQ 0x%02x (IREN 0x%02x) :%s%s%s%s%s%s%s%s%s",
 723                status, iren,
 724                status & CIR_IRSTS_RDR  ? " RDR"        : "",
 725                status & CIR_IRSTS_RTR  ? " RTR"        : "",
 726                status & CIR_IRSTS_PE   ? " PE"         : "",
 727                status & CIR_IRSTS_RFO  ? " RFO"        : "",
 728                status & CIR_IRSTS_TE   ? " TE"         : "",
 729                status & CIR_IRSTS_TTR  ? " TTR"        : "",
 730                status & CIR_IRSTS_TFU  ? " TFU"        : "",
 731                status & CIR_IRSTS_GH   ? " GH"         : "",
 732                status & ~(CIR_IRSTS_RDR | CIR_IRSTS_RTR | CIR_IRSTS_PE |
 733                           CIR_IRSTS_RFO | CIR_IRSTS_TE | CIR_IRSTS_TTR |
 734                           CIR_IRSTS_TFU | CIR_IRSTS_GH) ? " ?" : "");
 735}
 736
 737static bool nvt_cir_tx_inactive(struct nvt_dev *nvt)
 738{
 739        unsigned long flags;
 740        bool tx_inactive;
 741        u8 tx_state;
 742
 743        spin_lock_irqsave(&nvt->tx.lock, flags);
 744        tx_state = nvt->tx.tx_state;
 745        spin_unlock_irqrestore(&nvt->tx.lock, flags);
 746
 747        tx_inactive = (tx_state == ST_TX_NONE);
 748
 749        return tx_inactive;
 750}
 751
 752/* interrupt service routine for incoming and outgoing CIR data */
 753static irqreturn_t nvt_cir_isr(int irq, void *data)
 754{
 755        struct nvt_dev *nvt = data;
 756        u8 status, iren, cur_state;
 757        unsigned long flags;
 758
 759        nvt_dbg_verbose("%s firing", __func__);
 760
 761        nvt_efm_enable(nvt);
 762        nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
 763        nvt_efm_disable(nvt);
 764
 765        /*
 766         * Get IR Status register contents. Write 1 to ack/clear
 767         *
 768         * bit: reg name      - description
 769         *   7: CIR_IRSTS_RDR - RX Data Ready
 770         *   6: CIR_IRSTS_RTR - RX FIFO Trigger Level Reach
 771         *   5: CIR_IRSTS_PE  - Packet End
 772         *   4: CIR_IRSTS_RFO - RX FIFO Overrun (RDR will also be set)
 773         *   3: CIR_IRSTS_TE  - TX FIFO Empty
 774         *   2: CIR_IRSTS_TTR - TX FIFO Trigger Level Reach
 775         *   1: CIR_IRSTS_TFU - TX FIFO Underrun
 776         *   0: CIR_IRSTS_GH  - Min Length Detected
 777         */
 778        status = nvt_cir_reg_read(nvt, CIR_IRSTS);
 779        if (!status) {
 780                nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__);
 781                nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
 782                return IRQ_RETVAL(IRQ_NONE);
 783        }
 784
 785        /* ack/clear all irq flags we've got */
 786        nvt_cir_reg_write(nvt, status, CIR_IRSTS);
 787        nvt_cir_reg_write(nvt, 0, CIR_IRSTS);
 788
 789        /* Interrupt may be shared with CIR Wake, bail if CIR not enabled */
 790        iren = nvt_cir_reg_read(nvt, CIR_IREN);
 791        if (!iren) {
 792                nvt_dbg_verbose("%s exiting, CIR not enabled", __func__);
 793                return IRQ_RETVAL(IRQ_NONE);
 794        }
 795
 796        if (debug)
 797                nvt_cir_log_irqs(status, iren);
 798
 799        if (status & CIR_IRSTS_RTR) {
 800                /* FIXME: add code for study/learn mode */
 801                /* We only do rx if not tx'ing */
 802                if (nvt_cir_tx_inactive(nvt))
 803                        nvt_get_rx_ir_data(nvt);
 804        }
 805
 806        if (status & CIR_IRSTS_PE) {
 807                if (nvt_cir_tx_inactive(nvt))
 808                        nvt_get_rx_ir_data(nvt);
 809
 810                spin_lock_irqsave(&nvt->nvt_lock, flags);
 811
 812                cur_state = nvt->study_state;
 813
 814                spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 815
 816                if (cur_state == ST_STUDY_NONE)
 817                        nvt_clear_cir_fifo(nvt);
 818        }
 819
 820        if (status & CIR_IRSTS_TE)
 821                nvt_clear_tx_fifo(nvt);
 822
 823        if (status & CIR_IRSTS_TTR) {
 824                unsigned int pos, count;
 825                u8 tmp;
 826
 827                spin_lock_irqsave(&nvt->tx.lock, flags);
 828
 829                pos = nvt->tx.cur_buf_num;
 830                count = nvt->tx.buf_count;
 831
 832                /* Write data into the hardware tx fifo while pos < count */
 833                if (pos < count) {
 834                        nvt_cir_reg_write(nvt, nvt->tx.buf[pos], CIR_STXFIFO);
 835                        nvt->tx.cur_buf_num++;
 836                /* Disable TX FIFO Trigger Level Reach (TTR) interrupt */
 837                } else {
 838                        tmp = nvt_cir_reg_read(nvt, CIR_IREN);
 839                        nvt_cir_reg_write(nvt, tmp & ~CIR_IREN_TTR, CIR_IREN);
 840                }
 841
 842                spin_unlock_irqrestore(&nvt->tx.lock, flags);
 843
 844        }
 845
 846        if (status & CIR_IRSTS_TFU) {
 847                spin_lock_irqsave(&nvt->tx.lock, flags);
 848                if (nvt->tx.tx_state == ST_TX_REPLY) {
 849                        nvt->tx.tx_state = ST_TX_REQUEST;
 850                        wake_up(&nvt->tx.queue);
 851                }
 852                spin_unlock_irqrestore(&nvt->tx.lock, flags);
 853        }
 854
 855        nvt_dbg_verbose("%s done", __func__);
 856        return IRQ_RETVAL(IRQ_HANDLED);
 857}
 858
 859/* Interrupt service routine for CIR Wake */
 860static irqreturn_t nvt_cir_wake_isr(int irq, void *data)
 861{
 862        u8 status, iren, val;
 863        struct nvt_dev *nvt = data;
 864        unsigned long flags;
 865
 866        nvt_dbg_wake("%s firing", __func__);
 867
 868        status = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS);
 869        if (!status)
 870                return IRQ_RETVAL(IRQ_NONE);
 871
 872        if (status & CIR_WAKE_IRSTS_IR_PENDING)
 873                nvt_clear_cir_wake_fifo(nvt);
 874
 875        nvt_cir_wake_reg_write(nvt, status, CIR_WAKE_IRSTS);
 876        nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IRSTS);
 877
 878        /* Interrupt may be shared with CIR, bail if Wake not enabled */
 879        iren = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN);
 880        if (!iren) {
 881                nvt_dbg_wake("%s exiting, wake not enabled", __func__);
 882                return IRQ_RETVAL(IRQ_HANDLED);
 883        }
 884
 885        if ((status & CIR_WAKE_IRSTS_PE) &&
 886            (nvt->wake_state == ST_WAKE_START)) {
 887                while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX)) {
 888                        val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
 889                        nvt_dbg("setting wake up key: 0x%x", val);
 890                }
 891
 892                nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
 893                spin_lock_irqsave(&nvt->nvt_lock, flags);
 894                nvt->wake_state = ST_WAKE_FINISH;
 895                spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 896        }
 897
 898        nvt_dbg_wake("%s done", __func__);
 899        return IRQ_RETVAL(IRQ_HANDLED);
 900}
 901
 902static void nvt_enable_cir(struct nvt_dev *nvt)
 903{
 904        /* set function enable flags */
 905        nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
 906                          CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
 907                          CIR_IRCON);
 908
 909        nvt_efm_enable(nvt);
 910
 911        /* enable the CIR logical device */
 912        nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
 913        nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
 914
 915        nvt_efm_disable(nvt);
 916
 917        /* clear all pending interrupts */
 918        nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
 919
 920        /* enable interrupts */
 921        nvt_set_cir_iren(nvt);
 922}
 923
 924static void nvt_disable_cir(struct nvt_dev *nvt)
 925{
 926        /* disable CIR interrupts */
 927        nvt_cir_reg_write(nvt, 0, CIR_IREN);
 928
 929        /* clear any and all pending interrupts */
 930        nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
 931
 932        /* clear all function enable flags */
 933        nvt_cir_reg_write(nvt, 0, CIR_IRCON);
 934
 935        /* clear hardware rx and tx fifos */
 936        nvt_clear_cir_fifo(nvt);
 937        nvt_clear_tx_fifo(nvt);
 938
 939        nvt_efm_enable(nvt);
 940
 941        /* disable the CIR logical device */
 942        nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
 943        nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
 944
 945        nvt_efm_disable(nvt);
 946}
 947
 948static int nvt_open(struct rc_dev *dev)
 949{
 950        struct nvt_dev *nvt = dev->priv;
 951        unsigned long flags;
 952
 953        spin_lock_irqsave(&nvt->nvt_lock, flags);
 954        nvt_enable_cir(nvt);
 955        spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 956
 957        return 0;
 958}
 959
 960static void nvt_close(struct rc_dev *dev)
 961{
 962        struct nvt_dev *nvt = dev->priv;
 963        unsigned long flags;
 964
 965        spin_lock_irqsave(&nvt->nvt_lock, flags);
 966        nvt_disable_cir(nvt);
 967        spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 968}
 969
 970/* Allocate memory, probe hardware, and initialize everything */
 971static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
 972{
 973        struct nvt_dev *nvt;
 974        struct rc_dev *rdev;
 975        int ret = -ENOMEM;
 976
 977        nvt = kzalloc(sizeof(struct nvt_dev), GFP_KERNEL);
 978        if (!nvt)
 979                return ret;
 980
 981        /* input device for IR remote (and tx) */
 982        rdev = rc_allocate_device();
 983        if (!rdev)
 984                goto exit_free_dev_rdev;
 985
 986        ret = -ENODEV;
 987        /* activate pnp device */
 988        if (pnp_activate_dev(pdev) < 0) {
 989                dev_err(&pdev->dev, "Could not activate PNP device!\n");
 990                goto exit_free_dev_rdev;
 991        }
 992
 993        /* validate pnp resources */
 994        if (!pnp_port_valid(pdev, 0) ||
 995            pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) {
 996                dev_err(&pdev->dev, "IR PNP Port not valid!\n");
 997                goto exit_free_dev_rdev;
 998        }
 999
1000        if (!pnp_irq_valid(pdev, 0)) {
1001                dev_err(&pdev->dev, "PNP IRQ not valid!\n");
1002                goto exit_free_dev_rdev;
1003        }
1004
1005        if (!pnp_port_valid(pdev, 1) ||
1006            pnp_port_len(pdev, 1) < CIR_IOREG_LENGTH) {
1007                dev_err(&pdev->dev, "Wake PNP Port not valid!\n");
1008                goto exit_free_dev_rdev;
1009        }
1010
1011        nvt->cir_addr = pnp_port_start(pdev, 0);
1012        nvt->cir_irq  = pnp_irq(pdev, 0);
1013
1014        nvt->cir_wake_addr = pnp_port_start(pdev, 1);
1015        /* irq is always shared between cir and cir wake */
1016        nvt->cir_wake_irq  = nvt->cir_irq;
1017
1018        nvt->cr_efir = CR_EFIR;
1019        nvt->cr_efdr = CR_EFDR;
1020
1021        spin_lock_init(&nvt->nvt_lock);
1022        spin_lock_init(&nvt->tx.lock);
1023
1024        pnp_set_drvdata(pdev, nvt);
1025        nvt->pdev = pdev;
1026
1027        init_waitqueue_head(&nvt->tx.queue);
1028
1029        ret = nvt_hw_detect(nvt);
1030        if (ret)
1031                goto exit_free_dev_rdev;
1032
1033        /* Initialize CIR & CIR Wake Logical Devices */
1034        nvt_efm_enable(nvt);
1035        nvt_cir_ldev_init(nvt);
1036        nvt_cir_wake_ldev_init(nvt);
1037        nvt_efm_disable(nvt);
1038
1039        /* Initialize CIR & CIR Wake Config Registers */
1040        nvt_cir_regs_init(nvt);
1041        nvt_cir_wake_regs_init(nvt);
1042
1043        /* Set up the rc device */
1044        rdev->priv = nvt;
1045        rdev->driver_type = RC_DRIVER_IR_RAW;
1046        rdev->allowed_protocols = RC_BIT_ALL;
1047        rdev->open = nvt_open;
1048        rdev->close = nvt_close;
1049        rdev->tx_ir = nvt_tx_ir;
1050        rdev->s_tx_carrier = nvt_set_tx_carrier;
1051        rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
1052        rdev->input_phys = "nuvoton/cir0";
1053        rdev->input_id.bustype = BUS_HOST;
1054        rdev->input_id.vendor = PCI_VENDOR_ID_WINBOND2;
1055        rdev->input_id.product = nvt->chip_major;
1056        rdev->input_id.version = nvt->chip_minor;
1057        rdev->dev.parent = &pdev->dev;
1058        rdev->driver_name = NVT_DRIVER_NAME;
1059        rdev->map_name = RC_MAP_RC6_MCE;
1060        rdev->timeout = MS_TO_NS(100);
1061        /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
1062        rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
1063#if 0
1064        rdev->min_timeout = XYZ;
1065        rdev->max_timeout = XYZ;
1066        /* tx bits */
1067        rdev->tx_resolution = XYZ;
1068#endif
1069        nvt->rdev = rdev;
1070
1071        ret = rc_register_device(rdev);
1072        if (ret)
1073                goto exit_free_dev_rdev;
1074
1075        ret = -EBUSY;
1076        /* now claim resources */
1077        if (!request_region(nvt->cir_addr,
1078                            CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
1079                goto exit_unregister_device;
1080
1081        if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED,
1082                        NVT_DRIVER_NAME, (void *)nvt))
1083                goto exit_release_cir_addr;
1084
1085        if (!request_region(nvt->cir_wake_addr,
1086                            CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
1087                goto exit_free_irq;
1088
1089        if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED,
1090                        NVT_DRIVER_NAME, (void *)nvt))
1091                goto exit_release_cir_wake_addr;
1092
1093        device_init_wakeup(&pdev->dev, true);
1094
1095        nvt_pr(KERN_NOTICE, "driver has been successfully loaded\n");
1096        if (debug) {
1097                cir_dump_regs(nvt);
1098                cir_wake_dump_regs(nvt);
1099        }
1100
1101        return 0;
1102
1103exit_release_cir_wake_addr:
1104        release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH);
1105exit_free_irq:
1106        free_irq(nvt->cir_irq, nvt);
1107exit_release_cir_addr:
1108        release_region(nvt->cir_addr, CIR_IOREG_LENGTH);
1109exit_unregister_device:
1110        rc_unregister_device(rdev);
1111        rdev = NULL;
1112exit_free_dev_rdev:
1113        rc_free_device(rdev);
1114        kfree(nvt);
1115
1116        return ret;
1117}
1118
1119static void nvt_remove(struct pnp_dev *pdev)
1120{
1121        struct nvt_dev *nvt = pnp_get_drvdata(pdev);
1122        unsigned long flags;
1123
1124        spin_lock_irqsave(&nvt->nvt_lock, flags);
1125        /* disable CIR */
1126        nvt_cir_reg_write(nvt, 0, CIR_IREN);
1127        nvt_disable_cir(nvt);
1128        /* enable CIR Wake (for IR power-on) */
1129        nvt_enable_wake(nvt);
1130        spin_unlock_irqrestore(&nvt->nvt_lock, flags);
1131
1132        /* free resources */
1133        free_irq(nvt->cir_irq, nvt);
1134        free_irq(nvt->cir_wake_irq, nvt);
1135        release_region(nvt->cir_addr, CIR_IOREG_LENGTH);
1136        release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH);
1137
1138        rc_unregister_device(nvt->rdev);
1139
1140        kfree(nvt);
1141}
1142
1143static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
1144{
1145        struct nvt_dev *nvt = pnp_get_drvdata(pdev);
1146        unsigned long flags;
1147
1148        nvt_dbg("%s called", __func__);
1149
1150        /* zero out misc state tracking */
1151        spin_lock_irqsave(&nvt->nvt_lock, flags);
1152        nvt->study_state = ST_STUDY_NONE;
1153        nvt->wake_state = ST_WAKE_NONE;
1154        spin_unlock_irqrestore(&nvt->nvt_lock, flags);
1155
1156        spin_lock_irqsave(&nvt->tx.lock, flags);
1157        nvt->tx.tx_state = ST_TX_NONE;
1158        spin_unlock_irqrestore(&nvt->tx.lock, flags);
1159
1160        /* disable all CIR interrupts */
1161        nvt_cir_reg_write(nvt, 0, CIR_IREN);
1162
1163        nvt_efm_enable(nvt);
1164
1165        /* disable cir logical dev */
1166        nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
1167        nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
1168
1169        nvt_efm_disable(nvt);
1170
1171        /* make sure wake is enabled */
1172        nvt_enable_wake(nvt);
1173
1174        return 0;
1175}
1176
1177static int nvt_resume(struct pnp_dev *pdev)
1178{
1179        struct nvt_dev *nvt = pnp_get_drvdata(pdev);
1180
1181        nvt_dbg("%s called", __func__);
1182
1183        /* open interrupt */
1184        nvt_set_cir_iren(nvt);
1185
1186        /* Enable CIR logical device */
1187        nvt_efm_enable(nvt);
1188        nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
1189        nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
1190
1191        nvt_efm_disable(nvt);
1192
1193        nvt_cir_regs_init(nvt);
1194        nvt_cir_wake_regs_init(nvt);
1195
1196        return 0;
1197}
1198
1199static void nvt_shutdown(struct pnp_dev *pdev)
1200{
1201        struct nvt_dev *nvt = pnp_get_drvdata(pdev);
1202        nvt_enable_wake(nvt);
1203}
1204
1205static const struct pnp_device_id nvt_ids[] = {
1206        { "WEC0530", 0 },   /* CIR */
1207        { "NTN0530", 0 },   /* CIR for new chip's pnp id*/
1208        { "", 0 },
1209};
1210
1211static struct pnp_driver nvt_driver = {
1212        .name           = NVT_DRIVER_NAME,
1213        .id_table       = nvt_ids,
1214        .flags          = PNP_DRIVER_RES_DO_NOT_CHANGE,
1215        .probe          = nvt_probe,
1216        .remove         = nvt_remove,
1217        .suspend        = nvt_suspend,
1218        .resume         = nvt_resume,
1219        .shutdown       = nvt_shutdown,
1220};
1221
1222module_param(debug, int, S_IRUGO | S_IWUSR);
1223MODULE_PARM_DESC(debug, "Enable debugging output");
1224
1225MODULE_DEVICE_TABLE(pnp, nvt_ids);
1226MODULE_DESCRIPTION("Nuvoton W83667HG-A & W83677HG-I CIR driver");
1227
1228MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
1229MODULE_LICENSE("GPL");
1230
1231module_pnp_driver(nvt_driver);
1232