linux/drivers/uio/uio_dmem_genirq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * drivers/uio/uio_dmem_genirq.c
   4 *
   5 * Userspace I/O platform driver with generic IRQ handling code.
   6 *
   7 * Copyright (C) 2012 Damian Hobson-Garcia
   8 *
   9 * Based on uio_pdrv_genirq.c by Magnus Damm
  10 */
  11
  12#include <linux/platform_device.h>
  13#include <linux/uio_driver.h>
  14#include <linux/spinlock.h>
  15#include <linux/bitops.h>
  16#include <linux/module.h>
  17#include <linux/interrupt.h>
  18#include <linux/platform_data/uio_dmem_genirq.h>
  19#include <linux/stringify.h>
  20#include <linux/pm_runtime.h>
  21#include <linux/dma-mapping.h>
  22#include <linux/slab.h>
  23#include <linux/irq.h>
  24
  25#include <linux/of.h>
  26#include <linux/of_platform.h>
  27#include <linux/of_address.h>
  28
  29#define DRIVER_NAME "uio_dmem_genirq"
  30#define DMEM_MAP_ERROR (~0)
  31
  32struct uio_dmem_genirq_platdata {
  33        struct uio_info *uioinfo;
  34        spinlock_t lock;
  35        unsigned long flags;
  36        struct platform_device *pdev;
  37        unsigned int dmem_region_start;
  38        unsigned int num_dmem_regions;
  39        void *dmem_region_vaddr[MAX_UIO_MAPS];
  40        struct mutex alloc_lock;
  41        unsigned int refcnt;
  42};
  43
  44static int uio_dmem_genirq_open(struct uio_info *info, struct inode *inode)
  45{
  46        struct uio_dmem_genirq_platdata *priv = info->priv;
  47        struct uio_mem *uiomem;
  48        int dmem_region = priv->dmem_region_start;
  49
  50        uiomem = &priv->uioinfo->mem[priv->dmem_region_start];
  51
  52        mutex_lock(&priv->alloc_lock);
  53        while (!priv->refcnt && uiomem < &priv->uioinfo->mem[MAX_UIO_MAPS]) {
  54                void *addr;
  55                if (!uiomem->size)
  56                        break;
  57
  58                addr = dma_alloc_coherent(&priv->pdev->dev, uiomem->size,
  59                                (dma_addr_t *)&uiomem->addr, GFP_KERNEL);
  60                if (!addr) {
  61                        uiomem->addr = DMEM_MAP_ERROR;
  62                }
  63                priv->dmem_region_vaddr[dmem_region++] = addr;
  64                ++uiomem;
  65        }
  66        priv->refcnt++;
  67
  68        mutex_unlock(&priv->alloc_lock);
  69        /* Wait until the Runtime PM code has woken up the device */
  70        pm_runtime_get_sync(&priv->pdev->dev);
  71        return 0;
  72}
  73
  74static int uio_dmem_genirq_release(struct uio_info *info, struct inode *inode)
  75{
  76        struct uio_dmem_genirq_platdata *priv = info->priv;
  77        struct uio_mem *uiomem;
  78        int dmem_region = priv->dmem_region_start;
  79
  80        /* Tell the Runtime PM code that the device has become idle */
  81        pm_runtime_put_sync(&priv->pdev->dev);
  82
  83        uiomem = &priv->uioinfo->mem[priv->dmem_region_start];
  84
  85        mutex_lock(&priv->alloc_lock);
  86
  87        priv->refcnt--;
  88        while (!priv->refcnt && uiomem < &priv->uioinfo->mem[MAX_UIO_MAPS]) {
  89                if (!uiomem->size)
  90                        break;
  91                if (priv->dmem_region_vaddr[dmem_region]) {
  92                        dma_free_coherent(&priv->pdev->dev, uiomem->size,
  93                                        priv->dmem_region_vaddr[dmem_region],
  94                                        uiomem->addr);
  95                }
  96                uiomem->addr = DMEM_MAP_ERROR;
  97                ++dmem_region;
  98                ++uiomem;
  99        }
 100
 101        mutex_unlock(&priv->alloc_lock);
 102        return 0;
 103}
 104
 105static irqreturn_t uio_dmem_genirq_handler(int irq, struct uio_info *dev_info)
 106{
 107        struct uio_dmem_genirq_platdata *priv = dev_info->priv;
 108
 109        /* Just disable the interrupt in the interrupt controller, and
 110         * remember the state so we can allow user space to enable it later.
 111         */
 112
 113        if (!test_and_set_bit(0, &priv->flags))
 114                disable_irq_nosync(irq);
 115
 116        return IRQ_HANDLED;
 117}
 118
 119static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
 120{
 121        struct uio_dmem_genirq_platdata *priv = dev_info->priv;
 122        unsigned long flags;
 123
 124        /* Allow user space to enable and disable the interrupt
 125         * in the interrupt controller, but keep track of the
 126         * state to prevent per-irq depth damage.
 127         *
 128         * Serialize this operation to support multiple tasks.
 129         */
 130
 131        spin_lock_irqsave(&priv->lock, flags);
 132        if (irq_on) {
 133                if (test_and_clear_bit(0, &priv->flags))
 134                        enable_irq(dev_info->irq);
 135                spin_unlock_irqrestore(&priv->lock, flags);
 136        } else {
 137                if (!test_and_set_bit(0, &priv->flags)) {
 138                        spin_unlock_irqrestore(&priv->lock, flags);
 139                        disable_irq(dev_info->irq);
 140                }
 141        }
 142
 143        return 0;
 144}
 145
 146static void uio_dmem_genirq_pm_disable(void *data)
 147{
 148        struct device *dev = data;
 149
 150        pm_runtime_disable(dev);
 151}
 152
 153static int uio_dmem_genirq_probe(struct platform_device *pdev)
 154{
 155        struct uio_dmem_genirq_pdata *pdata = dev_get_platdata(&pdev->dev);
 156        struct uio_info *uioinfo = &pdata->uioinfo;
 157        struct uio_dmem_genirq_platdata *priv;
 158        struct uio_mem *uiomem;
 159        int ret = -EINVAL;
 160        int i;
 161
 162        if (pdev->dev.of_node) {
 163                /* alloc uioinfo for one device */
 164                uioinfo = devm_kzalloc(&pdev->dev, sizeof(*uioinfo), GFP_KERNEL);
 165                if (!uioinfo) {
 166                        dev_err(&pdev->dev, "unable to kmalloc\n");
 167                        return -ENOMEM;
 168                }
 169                uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
 170                                               pdev->dev.of_node);
 171                uioinfo->version = "devicetree";
 172        }
 173
 174        if (!uioinfo || !uioinfo->name || !uioinfo->version) {
 175                dev_err(&pdev->dev, "missing platform_data\n");
 176                return -EINVAL;
 177        }
 178
 179        if (uioinfo->handler || uioinfo->irqcontrol ||
 180            uioinfo->irq_flags & IRQF_SHARED) {
 181                dev_err(&pdev->dev, "interrupt configuration error\n");
 182                return -EINVAL;
 183        }
 184
 185        priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
 186        if (!priv) {
 187                dev_err(&pdev->dev, "unable to kmalloc\n");
 188                return -ENOMEM;
 189        }
 190
 191        dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
 192
 193        priv->uioinfo = uioinfo;
 194        spin_lock_init(&priv->lock);
 195        priv->flags = 0; /* interrupt is enabled to begin with */
 196        priv->pdev = pdev;
 197        mutex_init(&priv->alloc_lock);
 198
 199        if (!uioinfo->irq) {
 200                /* Multiple IRQs are not supported */
 201                ret = platform_get_irq(pdev, 0);
 202                if (ret == -ENXIO && pdev->dev.of_node)
 203                        ret = UIO_IRQ_NONE;
 204                else if (ret < 0)
 205                        return ret;
 206                uioinfo->irq = ret;
 207        }
 208
 209        if (uioinfo->irq) {
 210                struct irq_data *irq_data = irq_get_irq_data(uioinfo->irq);
 211
 212                /*
 213                 * If a level interrupt, dont do lazy disable. Otherwise the
 214                 * irq will fire again since clearing of the actual cause, on
 215                 * device level, is done in userspace
 216                 * irqd_is_level_type() isn't used since isn't valid until
 217                 * irq is configured.
 218                 */
 219                if (irq_data &&
 220                    irqd_get_trigger_type(irq_data) & IRQ_TYPE_LEVEL_MASK) {
 221                        dev_dbg(&pdev->dev, "disable lazy unmask\n");
 222                        irq_set_status_flags(uioinfo->irq, IRQ_DISABLE_UNLAZY);
 223                }
 224        }
 225
 226        uiomem = &uioinfo->mem[0];
 227
 228        for (i = 0; i < pdev->num_resources; ++i) {
 229                struct resource *r = &pdev->resource[i];
 230
 231                if (r->flags != IORESOURCE_MEM)
 232                        continue;
 233
 234                if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
 235                        dev_warn(&pdev->dev, "device has more than "
 236                                        __stringify(MAX_UIO_MAPS)
 237                                        " I/O memory resources.\n");
 238                        break;
 239                }
 240
 241                uiomem->memtype = UIO_MEM_PHYS;
 242                uiomem->addr = r->start;
 243                uiomem->size = resource_size(r);
 244                ++uiomem;
 245        }
 246
 247        priv->dmem_region_start = uiomem - &uioinfo->mem[0];
 248        priv->num_dmem_regions = pdata->num_dynamic_regions;
 249
 250        for (i = 0; i < pdata->num_dynamic_regions; ++i) {
 251                if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
 252                        dev_warn(&pdev->dev, "device has more than "
 253                                        __stringify(MAX_UIO_MAPS)
 254                                        " dynamic and fixed memory regions.\n");
 255                        break;
 256                }
 257                uiomem->memtype = UIO_MEM_PHYS;
 258                uiomem->addr = DMEM_MAP_ERROR;
 259                uiomem->size = pdata->dynamic_region_sizes[i];
 260                ++uiomem;
 261        }
 262
 263        while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) {
 264                uiomem->size = 0;
 265                ++uiomem;
 266        }
 267
 268        /* This driver requires no hardware specific kernel code to handle
 269         * interrupts. Instead, the interrupt handler simply disables the
 270         * interrupt in the interrupt controller. User space is responsible
 271         * for performing hardware specific acknowledge and re-enabling of
 272         * the interrupt in the interrupt controller.
 273         *
 274         * Interrupt sharing is not supported.
 275         */
 276
 277        uioinfo->handler = uio_dmem_genirq_handler;
 278        uioinfo->irqcontrol = uio_dmem_genirq_irqcontrol;
 279        uioinfo->open = uio_dmem_genirq_open;
 280        uioinfo->release = uio_dmem_genirq_release;
 281        uioinfo->priv = priv;
 282
 283        /* Enable Runtime PM for this device:
 284         * The device starts in suspended state to allow the hardware to be
 285         * turned off by default. The Runtime PM bus code should power on the
 286         * hardware and enable clocks at open().
 287         */
 288        pm_runtime_enable(&pdev->dev);
 289
 290        ret = devm_add_action_or_reset(&pdev->dev, uio_dmem_genirq_pm_disable, &pdev->dev);
 291        if (ret)
 292                return ret;
 293
 294        return devm_uio_register_device(&pdev->dev, priv->uioinfo);
 295}
 296
 297static int uio_dmem_genirq_runtime_nop(struct device *dev)
 298{
 299        /* Runtime PM callback shared between ->runtime_suspend()
 300         * and ->runtime_resume(). Simply returns success.
 301         *
 302         * In this driver pm_runtime_get_sync() and pm_runtime_put_sync()
 303         * are used at open() and release() time. This allows the
 304         * Runtime PM code to turn off power to the device while the
 305         * device is unused, ie before open() and after release().
 306         *
 307         * This Runtime PM callback does not need to save or restore
 308         * any registers since user space is responsbile for hardware
 309         * register reinitialization after open().
 310         */
 311        return 0;
 312}
 313
 314static const struct dev_pm_ops uio_dmem_genirq_dev_pm_ops = {
 315        .runtime_suspend = uio_dmem_genirq_runtime_nop,
 316        .runtime_resume = uio_dmem_genirq_runtime_nop,
 317};
 318
 319#ifdef CONFIG_OF
 320static const struct of_device_id uio_of_genirq_match[] = {
 321        { /* empty for now */ },
 322};
 323MODULE_DEVICE_TABLE(of, uio_of_genirq_match);
 324#endif
 325
 326static struct platform_driver uio_dmem_genirq = {
 327        .probe = uio_dmem_genirq_probe,
 328        .driver = {
 329                .name = DRIVER_NAME,
 330                .pm = &uio_dmem_genirq_dev_pm_ops,
 331                .of_match_table = of_match_ptr(uio_of_genirq_match),
 332        },
 333};
 334
 335module_platform_driver(uio_dmem_genirq);
 336
 337MODULE_AUTHOR("Damian Hobson-Garcia");
 338MODULE_DESCRIPTION("Userspace I/O platform driver with dynamic memory.");
 339MODULE_LICENSE("GPL v2");
 340MODULE_ALIAS("platform:" DRIVER_NAME);
 341