linux/drivers/dma/stm32-dmamux.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *
   4 * Copyright (C) STMicroelectronics SA 2017
   5 * Author(s): M'boumba Cedric Madianga <cedric.madianga@gmail.com>
   6 *            Pierre-Yves Mordret <pierre-yves.mordret@st.com>
   7 *
   8 * DMA Router driver for STM32 DMA MUX
   9 *
  10 * Based on TI DMA Crossbar driver
  11 */
  12
  13#include <linux/clk.h>
  14#include <linux/delay.h>
  15#include <linux/err.h>
  16#include <linux/init.h>
  17#include <linux/module.h>
  18#include <linux/of_device.h>
  19#include <linux/of_dma.h>
  20#include <linux/pm_runtime.h>
  21#include <linux/reset.h>
  22#include <linux/slab.h>
  23#include <linux/spinlock.h>
  24
  25#define STM32_DMAMUX_CCR(x)             (0x4 * (x))
  26#define STM32_DMAMUX_MAX_DMA_REQUESTS   32
  27#define STM32_DMAMUX_MAX_REQUESTS       255
  28
  29struct stm32_dmamux {
  30        u32 master;
  31        u32 request;
  32        u32 chan_id;
  33};
  34
  35struct stm32_dmamux_data {
  36        struct dma_router dmarouter;
  37        struct clk *clk;
  38        void __iomem *iomem;
  39        u32 dma_requests; /* Number of DMA requests connected to DMAMUX */
  40        u32 dmamux_requests; /* Number of DMA requests routed toward DMAs */
  41        spinlock_t lock; /* Protects register access */
  42        unsigned long *dma_inuse; /* Used DMA channel */
  43        u32 ccr[STM32_DMAMUX_MAX_DMA_REQUESTS]; /* Used to backup CCR register
  44                                                 * in suspend
  45                                                 */
  46        u32 dma_reqs[]; /* Number of DMA Request per DMA masters.
  47                         *  [0] holds number of DMA Masters.
  48                         *  To be kept at very end end of this structure
  49                         */
  50};
  51
  52static inline u32 stm32_dmamux_read(void __iomem *iomem, u32 reg)
  53{
  54        return readl_relaxed(iomem + reg);
  55}
  56
  57static inline void stm32_dmamux_write(void __iomem *iomem, u32 reg, u32 val)
  58{
  59        writel_relaxed(val, iomem + reg);
  60}
  61
  62static void stm32_dmamux_free(struct device *dev, void *route_data)
  63{
  64        struct stm32_dmamux_data *dmamux = dev_get_drvdata(dev);
  65        struct stm32_dmamux *mux = route_data;
  66        unsigned long flags;
  67
  68        /* Clear dma request */
  69        spin_lock_irqsave(&dmamux->lock, flags);
  70
  71        stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 0);
  72        clear_bit(mux->chan_id, dmamux->dma_inuse);
  73
  74        pm_runtime_put_sync(dev);
  75
  76        spin_unlock_irqrestore(&dmamux->lock, flags);
  77
  78        dev_dbg(dev, "Unmapping DMAMUX(%u) to DMA%u(%u)\n",
  79                mux->request, mux->master, mux->chan_id);
  80
  81        kfree(mux);
  82}
  83
  84static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
  85                                         struct of_dma *ofdma)
  86{
  87        struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
  88        struct stm32_dmamux_data *dmamux = platform_get_drvdata(pdev);
  89        struct stm32_dmamux *mux;
  90        u32 i, min, max;
  91        int ret;
  92        unsigned long flags;
  93
  94        if (dma_spec->args_count != 3) {
  95                dev_err(&pdev->dev, "invalid number of dma mux args\n");
  96                return ERR_PTR(-EINVAL);
  97        }
  98
  99        if (dma_spec->args[0] > dmamux->dmamux_requests) {
 100                dev_err(&pdev->dev, "invalid mux request number: %d\n",
 101                        dma_spec->args[0]);
 102                return ERR_PTR(-EINVAL);
 103        }
 104
 105        mux = kzalloc(sizeof(*mux), GFP_KERNEL);
 106        if (!mux)
 107                return ERR_PTR(-ENOMEM);
 108
 109        spin_lock_irqsave(&dmamux->lock, flags);
 110        mux->chan_id = find_first_zero_bit(dmamux->dma_inuse,
 111                                           dmamux->dma_requests);
 112
 113        if (mux->chan_id == dmamux->dma_requests) {
 114                spin_unlock_irqrestore(&dmamux->lock, flags);
 115                dev_err(&pdev->dev, "Run out of free DMA requests\n");
 116                ret = -ENOMEM;
 117                goto error_chan_id;
 118        }
 119        set_bit(mux->chan_id, dmamux->dma_inuse);
 120        spin_unlock_irqrestore(&dmamux->lock, flags);
 121
 122        /* Look for DMA Master */
 123        for (i = 1, min = 0, max = dmamux->dma_reqs[i];
 124             i <= dmamux->dma_reqs[0];
 125             min += dmamux->dma_reqs[i], max += dmamux->dma_reqs[++i])
 126                if (mux->chan_id < max)
 127                        break;
 128        mux->master = i - 1;
 129
 130        /* The of_node_put() will be done in of_dma_router_xlate function */
 131        dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", i - 1);
 132        if (!dma_spec->np) {
 133                dev_err(&pdev->dev, "can't get dma master\n");
 134                ret = -EINVAL;
 135                goto error;
 136        }
 137
 138        /* Set dma request */
 139        spin_lock_irqsave(&dmamux->lock, flags);
 140        ret = pm_runtime_resume_and_get(&pdev->dev);
 141        if (ret < 0) {
 142                spin_unlock_irqrestore(&dmamux->lock, flags);
 143                goto error;
 144        }
 145        spin_unlock_irqrestore(&dmamux->lock, flags);
 146
 147        mux->request = dma_spec->args[0];
 148
 149        /*  craft DMA spec */
 150        dma_spec->args[3] = dma_spec->args[2];
 151        dma_spec->args[2] = dma_spec->args[1];
 152        dma_spec->args[1] = 0;
 153        dma_spec->args[0] = mux->chan_id - min;
 154        dma_spec->args_count = 4;
 155
 156        stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id),
 157                           mux->request);
 158        dev_dbg(&pdev->dev, "Mapping DMAMUX(%u) to DMA%u(%u)\n",
 159                mux->request, mux->master, mux->chan_id);
 160
 161        return mux;
 162
 163error:
 164        clear_bit(mux->chan_id, dmamux->dma_inuse);
 165
 166error_chan_id:
 167        kfree(mux);
 168        return ERR_PTR(ret);
 169}
 170
 171static const struct of_device_id stm32_stm32dma_master_match[] __maybe_unused = {
 172        { .compatible = "st,stm32-dma", },
 173        {},
 174};
 175
 176static int stm32_dmamux_probe(struct platform_device *pdev)
 177{
 178        struct device_node *node = pdev->dev.of_node;
 179        const struct of_device_id *match;
 180        struct device_node *dma_node;
 181        struct stm32_dmamux_data *stm32_dmamux;
 182        struct resource *res;
 183        void __iomem *iomem;
 184        struct reset_control *rst;
 185        int i, count, ret;
 186        u32 dma_req;
 187
 188        if (!node)
 189                return -ENODEV;
 190
 191        count = device_property_count_u32(&pdev->dev, "dma-masters");
 192        if (count < 0) {
 193                dev_err(&pdev->dev, "Can't get DMA master(s) node\n");
 194                return -ENODEV;
 195        }
 196
 197        stm32_dmamux = devm_kzalloc(&pdev->dev, sizeof(*stm32_dmamux) +
 198                                    sizeof(u32) * (count + 1), GFP_KERNEL);
 199        if (!stm32_dmamux)
 200                return -ENOMEM;
 201
 202        dma_req = 0;
 203        for (i = 1; i <= count; i++) {
 204                dma_node = of_parse_phandle(node, "dma-masters", i - 1);
 205
 206                match = of_match_node(stm32_stm32dma_master_match, dma_node);
 207                if (!match) {
 208                        dev_err(&pdev->dev, "DMA master is not supported\n");
 209                        of_node_put(dma_node);
 210                        return -EINVAL;
 211                }
 212
 213                if (of_property_read_u32(dma_node, "dma-requests",
 214                                         &stm32_dmamux->dma_reqs[i])) {
 215                        dev_info(&pdev->dev,
 216                                 "Missing MUX output information, using %u.\n",
 217                                 STM32_DMAMUX_MAX_DMA_REQUESTS);
 218                        stm32_dmamux->dma_reqs[i] =
 219                                STM32_DMAMUX_MAX_DMA_REQUESTS;
 220                }
 221                dma_req += stm32_dmamux->dma_reqs[i];
 222                of_node_put(dma_node);
 223        }
 224
 225        if (dma_req > STM32_DMAMUX_MAX_DMA_REQUESTS) {
 226                dev_err(&pdev->dev, "Too many DMA Master Requests to manage\n");
 227                return -ENODEV;
 228        }
 229
 230        stm32_dmamux->dma_requests = dma_req;
 231        stm32_dmamux->dma_reqs[0] = count;
 232        stm32_dmamux->dma_inuse = devm_kcalloc(&pdev->dev,
 233                                               BITS_TO_LONGS(dma_req),
 234                                               sizeof(unsigned long),
 235                                               GFP_KERNEL);
 236        if (!stm32_dmamux->dma_inuse)
 237                return -ENOMEM;
 238
 239        if (device_property_read_u32(&pdev->dev, "dma-requests",
 240                                     &stm32_dmamux->dmamux_requests)) {
 241                stm32_dmamux->dmamux_requests = STM32_DMAMUX_MAX_REQUESTS;
 242                dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n",
 243                         stm32_dmamux->dmamux_requests);
 244        }
 245        pm_runtime_get_noresume(&pdev->dev);
 246
 247        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 248        iomem = devm_ioremap_resource(&pdev->dev, res);
 249        if (IS_ERR(iomem))
 250                return PTR_ERR(iomem);
 251
 252        spin_lock_init(&stm32_dmamux->lock);
 253
 254        stm32_dmamux->clk = devm_clk_get(&pdev->dev, NULL);
 255        if (IS_ERR(stm32_dmamux->clk))
 256                return dev_err_probe(&pdev->dev, PTR_ERR(stm32_dmamux->clk),
 257                                     "Missing clock controller\n");
 258
 259        ret = clk_prepare_enable(stm32_dmamux->clk);
 260        if (ret < 0) {
 261                dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
 262                return ret;
 263        }
 264
 265        rst = devm_reset_control_get(&pdev->dev, NULL);
 266        if (IS_ERR(rst)) {
 267                ret = PTR_ERR(rst);
 268                if (ret == -EPROBE_DEFER)
 269                        goto err_clk;
 270        } else {
 271                reset_control_assert(rst);
 272                udelay(2);
 273                reset_control_deassert(rst);
 274        }
 275
 276        stm32_dmamux->iomem = iomem;
 277        stm32_dmamux->dmarouter.dev = &pdev->dev;
 278        stm32_dmamux->dmarouter.route_free = stm32_dmamux_free;
 279
 280        platform_set_drvdata(pdev, stm32_dmamux);
 281        pm_runtime_set_active(&pdev->dev);
 282        pm_runtime_enable(&pdev->dev);
 283
 284        pm_runtime_get_noresume(&pdev->dev);
 285
 286        /* Reset the dmamux */
 287        for (i = 0; i < stm32_dmamux->dma_requests; i++)
 288                stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0);
 289
 290        pm_runtime_put(&pdev->dev);
 291
 292        ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
 293                                     &stm32_dmamux->dmarouter);
 294        if (ret)
 295                goto pm_disable;
 296
 297        return 0;
 298
 299pm_disable:
 300        pm_runtime_disable(&pdev->dev);
 301err_clk:
 302        clk_disable_unprepare(stm32_dmamux->clk);
 303
 304        return ret;
 305}
 306
 307#ifdef CONFIG_PM
 308static int stm32_dmamux_runtime_suspend(struct device *dev)
 309{
 310        struct platform_device *pdev = to_platform_device(dev);
 311        struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
 312
 313        clk_disable_unprepare(stm32_dmamux->clk);
 314
 315        return 0;
 316}
 317
 318static int stm32_dmamux_runtime_resume(struct device *dev)
 319{
 320        struct platform_device *pdev = to_platform_device(dev);
 321        struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
 322        int ret;
 323
 324        ret = clk_prepare_enable(stm32_dmamux->clk);
 325        if (ret) {
 326                dev_err(&pdev->dev, "failed to prepare_enable clock\n");
 327                return ret;
 328        }
 329
 330        return 0;
 331}
 332#endif
 333
 334#ifdef CONFIG_PM_SLEEP
 335static int stm32_dmamux_suspend(struct device *dev)
 336{
 337        struct platform_device *pdev = to_platform_device(dev);
 338        struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
 339        int i, ret;
 340
 341        ret = pm_runtime_resume_and_get(dev);
 342        if (ret < 0)
 343                return ret;
 344
 345        for (i = 0; i < stm32_dmamux->dma_requests; i++)
 346                stm32_dmamux->ccr[i] = stm32_dmamux_read(stm32_dmamux->iomem,
 347                                                         STM32_DMAMUX_CCR(i));
 348
 349        pm_runtime_put_sync(dev);
 350
 351        pm_runtime_force_suspend(dev);
 352
 353        return 0;
 354}
 355
 356static int stm32_dmamux_resume(struct device *dev)
 357{
 358        struct platform_device *pdev = to_platform_device(dev);
 359        struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
 360        int i, ret;
 361
 362        ret = pm_runtime_force_resume(dev);
 363        if (ret < 0)
 364                return ret;
 365
 366        ret = pm_runtime_resume_and_get(dev);
 367        if (ret < 0)
 368                return ret;
 369
 370        for (i = 0; i < stm32_dmamux->dma_requests; i++)
 371                stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i),
 372                                   stm32_dmamux->ccr[i]);
 373
 374        pm_runtime_put_sync(dev);
 375
 376        return 0;
 377}
 378#endif
 379
 380static const struct dev_pm_ops stm32_dmamux_pm_ops = {
 381        SET_SYSTEM_SLEEP_PM_OPS(stm32_dmamux_suspend, stm32_dmamux_resume)
 382        SET_RUNTIME_PM_OPS(stm32_dmamux_runtime_suspend,
 383                           stm32_dmamux_runtime_resume, NULL)
 384};
 385
 386static const struct of_device_id stm32_dmamux_match[] = {
 387        { .compatible = "st,stm32h7-dmamux" },
 388        {},
 389};
 390
 391static struct platform_driver stm32_dmamux_driver = {
 392        .probe  = stm32_dmamux_probe,
 393        .driver = {
 394                .name = "stm32-dmamux",
 395                .of_match_table = stm32_dmamux_match,
 396                .pm = &stm32_dmamux_pm_ops,
 397        },
 398};
 399
 400static int __init stm32_dmamux_init(void)
 401{
 402        return platform_driver_register(&stm32_dmamux_driver);
 403}
 404arch_initcall(stm32_dmamux_init);
 405
 406MODULE_DESCRIPTION("DMA Router driver for STM32 DMA MUX");
 407MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>");
 408MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>");
 409MODULE_LICENSE("GPL v2");
 410