linux/drivers/dma/sh/usb-dmac.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Renesas USB DMA Controller Driver
   4 *
   5 * Copyright (C) 2015 Renesas Electronics Corporation
   6 *
   7 * based on rcar-dmac.c
   8 * Copyright (C) 2014 Renesas Electronics Inc.
   9 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
  10 */
  11
  12#include <linux/delay.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/dmaengine.h>
  15#include <linux/interrupt.h>
  16#include <linux/list.h>
  17#include <linux/module.h>
  18#include <linux/of.h>
  19#include <linux/of_dma.h>
  20#include <linux/of_platform.h>
  21#include <linux/platform_device.h>
  22#include <linux/pm_runtime.h>
  23#include <linux/slab.h>
  24#include <linux/spinlock.h>
  25
  26#include "../dmaengine.h"
  27#include "../virt-dma.h"
  28
  29/*
  30 * struct usb_dmac_sg - Descriptor for a hardware transfer
  31 * @mem_addr: memory address
  32 * @size: transfer size in bytes
  33 */
  34struct usb_dmac_sg {
  35        dma_addr_t mem_addr;
  36        u32 size;
  37};
  38
  39/*
  40 * struct usb_dmac_desc - USB DMA Transfer Descriptor
  41 * @vd: base virtual channel DMA transaction descriptor
  42 * @direction: direction of the DMA transfer
  43 * @sg_allocated_len: length of allocated sg
  44 * @sg_len: length of sg
  45 * @sg_index: index of sg
  46 * @residue: residue after the DMAC completed a transfer
  47 * @node: node for desc_got and desc_freed
  48 * @done_cookie: cookie after the DMAC completed a transfer
  49 * @sg: information for the transfer
  50 */
  51struct usb_dmac_desc {
  52        struct virt_dma_desc vd;
  53        enum dma_transfer_direction direction;
  54        unsigned int sg_allocated_len;
  55        unsigned int sg_len;
  56        unsigned int sg_index;
  57        u32 residue;
  58        struct list_head node;
  59        dma_cookie_t done_cookie;
  60        struct usb_dmac_sg sg[];
  61};
  62
  63#define to_usb_dmac_desc(vd)    container_of(vd, struct usb_dmac_desc, vd)
  64
  65/*
  66 * struct usb_dmac_chan - USB DMA Controller Channel
  67 * @vc: base virtual DMA channel object
  68 * @iomem: channel I/O memory base
  69 * @index: index of this channel in the controller
  70 * @irq: irq number of this channel
  71 * @desc: the current descriptor
  72 * @descs_allocated: number of descriptors allocated
  73 * @desc_got: got descriptors
  74 * @desc_freed: freed descriptors after the DMAC completed a transfer
  75 */
  76struct usb_dmac_chan {
  77        struct virt_dma_chan vc;
  78        void __iomem *iomem;
  79        unsigned int index;
  80        int irq;
  81        struct usb_dmac_desc *desc;
  82        int descs_allocated;
  83        struct list_head desc_got;
  84        struct list_head desc_freed;
  85};
  86
  87#define to_usb_dmac_chan(c) container_of(c, struct usb_dmac_chan, vc.chan)
  88
  89/*
  90 * struct usb_dmac - USB DMA Controller
  91 * @engine: base DMA engine object
  92 * @dev: the hardware device
  93 * @iomem: remapped I/O memory base
  94 * @n_channels: number of available channels
  95 * @channels: array of DMAC channels
  96 */
  97struct usb_dmac {
  98        struct dma_device engine;
  99        struct device *dev;
 100        void __iomem *iomem;
 101
 102        unsigned int n_channels;
 103        struct usb_dmac_chan *channels;
 104};
 105
 106#define to_usb_dmac(d)          container_of(d, struct usb_dmac, engine)
 107
 108/* -----------------------------------------------------------------------------
 109 * Registers
 110 */
 111
 112#define USB_DMAC_CHAN_OFFSET(i)         (0x20 + 0x20 * (i))
 113
 114#define USB_DMASWR                      0x0008
 115#define USB_DMASWR_SWR                  (1 << 0)
 116#define USB_DMAOR                       0x0060
 117#define USB_DMAOR_AE                    (1 << 1)
 118#define USB_DMAOR_DME                   (1 << 0)
 119
 120#define USB_DMASAR                      0x0000
 121#define USB_DMADAR                      0x0004
 122#define USB_DMATCR                      0x0008
 123#define USB_DMATCR_MASK                 0x00ffffff
 124#define USB_DMACHCR                     0x0014
 125#define USB_DMACHCR_FTE                 (1 << 24)
 126#define USB_DMACHCR_NULLE               (1 << 16)
 127#define USB_DMACHCR_NULL                (1 << 12)
 128#define USB_DMACHCR_TS_8B               ((0 << 7) | (0 << 6))
 129#define USB_DMACHCR_TS_16B              ((0 << 7) | (1 << 6))
 130#define USB_DMACHCR_TS_32B              ((1 << 7) | (0 << 6))
 131#define USB_DMACHCR_IE                  (1 << 5)
 132#define USB_DMACHCR_SP                  (1 << 2)
 133#define USB_DMACHCR_TE                  (1 << 1)
 134#define USB_DMACHCR_DE                  (1 << 0)
 135#define USB_DMATEND                     0x0018
 136
 137/* Hardcode the xfer_shift to 5 (32bytes) */
 138#define USB_DMAC_XFER_SHIFT     5
 139#define USB_DMAC_XFER_SIZE      (1 << USB_DMAC_XFER_SHIFT)
 140#define USB_DMAC_CHCR_TS        USB_DMACHCR_TS_32B
 141#define USB_DMAC_SLAVE_BUSWIDTH DMA_SLAVE_BUSWIDTH_32_BYTES
 142
 143/* for descriptors */
 144#define USB_DMAC_INITIAL_NR_DESC        16
 145#define USB_DMAC_INITIAL_NR_SG          8
 146
 147/* -----------------------------------------------------------------------------
 148 * Device access
 149 */
 150
 151static void usb_dmac_write(struct usb_dmac *dmac, u32 reg, u32 data)
 152{
 153        writel(data, dmac->iomem + reg);
 154}
 155
 156static u32 usb_dmac_read(struct usb_dmac *dmac, u32 reg)
 157{
 158        return readl(dmac->iomem + reg);
 159}
 160
 161static u32 usb_dmac_chan_read(struct usb_dmac_chan *chan, u32 reg)
 162{
 163        return readl(chan->iomem + reg);
 164}
 165
 166static void usb_dmac_chan_write(struct usb_dmac_chan *chan, u32 reg, u32 data)
 167{
 168        writel(data, chan->iomem + reg);
 169}
 170
 171/* -----------------------------------------------------------------------------
 172 * Initialization and configuration
 173 */
 174
 175static bool usb_dmac_chan_is_busy(struct usb_dmac_chan *chan)
 176{
 177        u32 chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
 178
 179        return (chcr & (USB_DMACHCR_DE | USB_DMACHCR_TE)) == USB_DMACHCR_DE;
 180}
 181
 182static u32 usb_dmac_calc_tend(u32 size)
 183{
 184        /*
 185         * Please refer to the Figure "Example of Final Transaction Valid
 186         * Data Transfer Enable (EDTEN) Setting" in the data sheet.
 187         */
 188        return 0xffffffff << (32 - (size % USB_DMAC_XFER_SIZE ? :
 189                                                USB_DMAC_XFER_SIZE));
 190}
 191
 192/* This function is already held by vc.lock */
 193static void usb_dmac_chan_start_sg(struct usb_dmac_chan *chan,
 194                                   unsigned int index)
 195{
 196        struct usb_dmac_desc *desc = chan->desc;
 197        struct usb_dmac_sg *sg = desc->sg + index;
 198        dma_addr_t src_addr = 0, dst_addr = 0;
 199
 200        WARN_ON_ONCE(usb_dmac_chan_is_busy(chan));
 201
 202        if (desc->direction == DMA_DEV_TO_MEM)
 203                dst_addr = sg->mem_addr;
 204        else
 205                src_addr = sg->mem_addr;
 206
 207        dev_dbg(chan->vc.chan.device->dev,
 208                "chan%u: queue sg %p: %u@%pad -> %pad\n",
 209                chan->index, sg, sg->size, &src_addr, &dst_addr);
 210
 211        usb_dmac_chan_write(chan, USB_DMASAR, src_addr & 0xffffffff);
 212        usb_dmac_chan_write(chan, USB_DMADAR, dst_addr & 0xffffffff);
 213        usb_dmac_chan_write(chan, USB_DMATCR,
 214                            DIV_ROUND_UP(sg->size, USB_DMAC_XFER_SIZE));
 215        usb_dmac_chan_write(chan, USB_DMATEND, usb_dmac_calc_tend(sg->size));
 216
 217        usb_dmac_chan_write(chan, USB_DMACHCR, USB_DMAC_CHCR_TS |
 218                        USB_DMACHCR_NULLE | USB_DMACHCR_IE | USB_DMACHCR_DE);
 219}
 220
 221/* This function is already held by vc.lock */
 222static void usb_dmac_chan_start_desc(struct usb_dmac_chan *chan)
 223{
 224        struct virt_dma_desc *vd;
 225
 226        vd = vchan_next_desc(&chan->vc);
 227        if (!vd) {
 228                chan->desc = NULL;
 229                return;
 230        }
 231
 232        /*
 233         * Remove this request from vc->desc_issued. Otherwise, this driver
 234         * will get the previous value from vchan_next_desc() after a transfer
 235         * was completed.
 236         */
 237        list_del(&vd->node);
 238
 239        chan->desc = to_usb_dmac_desc(vd);
 240        chan->desc->sg_index = 0;
 241        usb_dmac_chan_start_sg(chan, 0);
 242}
 243
 244static int usb_dmac_init(struct usb_dmac *dmac)
 245{
 246        u16 dmaor;
 247
 248        /* Clear all channels and enable the DMAC globally. */
 249        usb_dmac_write(dmac, USB_DMAOR, USB_DMAOR_DME);
 250
 251        dmaor = usb_dmac_read(dmac, USB_DMAOR);
 252        if ((dmaor & (USB_DMAOR_AE | USB_DMAOR_DME)) != USB_DMAOR_DME) {
 253                dev_warn(dmac->dev, "DMAOR initialization failed.\n");
 254                return -EIO;
 255        }
 256
 257        return 0;
 258}
 259
 260/* -----------------------------------------------------------------------------
 261 * Descriptors allocation and free
 262 */
 263static int usb_dmac_desc_alloc(struct usb_dmac_chan *chan, unsigned int sg_len,
 264                               gfp_t gfp)
 265{
 266        struct usb_dmac_desc *desc;
 267        unsigned long flags;
 268
 269        desc = kzalloc(struct_size(desc, sg, sg_len), gfp);
 270        if (!desc)
 271                return -ENOMEM;
 272
 273        desc->sg_allocated_len = sg_len;
 274        INIT_LIST_HEAD(&desc->node);
 275
 276        spin_lock_irqsave(&chan->vc.lock, flags);
 277        list_add_tail(&desc->node, &chan->desc_freed);
 278        spin_unlock_irqrestore(&chan->vc.lock, flags);
 279
 280        return 0;
 281}
 282
 283static void usb_dmac_desc_free(struct usb_dmac_chan *chan)
 284{
 285        struct usb_dmac_desc *desc, *_desc;
 286        LIST_HEAD(list);
 287
 288        list_splice_init(&chan->desc_freed, &list);
 289        list_splice_init(&chan->desc_got, &list);
 290
 291        list_for_each_entry_safe(desc, _desc, &list, node) {
 292                list_del(&desc->node);
 293                kfree(desc);
 294        }
 295        chan->descs_allocated = 0;
 296}
 297
 298static struct usb_dmac_desc *usb_dmac_desc_get(struct usb_dmac_chan *chan,
 299                                               unsigned int sg_len, gfp_t gfp)
 300{
 301        struct usb_dmac_desc *desc = NULL;
 302        unsigned long flags;
 303
 304        /* Get a freed descritpor */
 305        spin_lock_irqsave(&chan->vc.lock, flags);
 306        list_for_each_entry(desc, &chan->desc_freed, node) {
 307                if (sg_len <= desc->sg_allocated_len) {
 308                        list_move_tail(&desc->node, &chan->desc_got);
 309                        spin_unlock_irqrestore(&chan->vc.lock, flags);
 310                        return desc;
 311                }
 312        }
 313        spin_unlock_irqrestore(&chan->vc.lock, flags);
 314
 315        /* Allocate a new descriptor */
 316        if (!usb_dmac_desc_alloc(chan, sg_len, gfp)) {
 317                /* If allocated the desc, it was added to tail of the list */
 318                spin_lock_irqsave(&chan->vc.lock, flags);
 319                desc = list_last_entry(&chan->desc_freed, struct usb_dmac_desc,
 320                                       node);
 321                list_move_tail(&desc->node, &chan->desc_got);
 322                spin_unlock_irqrestore(&chan->vc.lock, flags);
 323                return desc;
 324        }
 325
 326        return NULL;
 327}
 328
 329static void usb_dmac_desc_put(struct usb_dmac_chan *chan,
 330                              struct usb_dmac_desc *desc)
 331{
 332        unsigned long flags;
 333
 334        spin_lock_irqsave(&chan->vc.lock, flags);
 335        list_move_tail(&desc->node, &chan->desc_freed);
 336        spin_unlock_irqrestore(&chan->vc.lock, flags);
 337}
 338
 339/* -----------------------------------------------------------------------------
 340 * Stop and reset
 341 */
 342
 343static void usb_dmac_soft_reset(struct usb_dmac_chan *uchan)
 344{
 345        struct dma_chan *chan = &uchan->vc.chan;
 346        struct usb_dmac *dmac = to_usb_dmac(chan->device);
 347        int i;
 348
 349        /* Don't issue soft reset if any one of channels is busy */
 350        for (i = 0; i < dmac->n_channels; ++i) {
 351                if (usb_dmac_chan_is_busy(uchan))
 352                        return;
 353        }
 354
 355        usb_dmac_write(dmac, USB_DMAOR, 0);
 356        usb_dmac_write(dmac, USB_DMASWR, USB_DMASWR_SWR);
 357        udelay(100);
 358        usb_dmac_write(dmac, USB_DMASWR, 0);
 359        usb_dmac_write(dmac, USB_DMAOR, 1);
 360}
 361
 362static void usb_dmac_chan_halt(struct usb_dmac_chan *chan)
 363{
 364        u32 chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
 365
 366        chcr &= ~(USB_DMACHCR_IE | USB_DMACHCR_TE | USB_DMACHCR_DE);
 367        usb_dmac_chan_write(chan, USB_DMACHCR, chcr);
 368
 369        usb_dmac_soft_reset(chan);
 370}
 371
 372static void usb_dmac_stop(struct usb_dmac *dmac)
 373{
 374        usb_dmac_write(dmac, USB_DMAOR, 0);
 375}
 376
 377/* -----------------------------------------------------------------------------
 378 * DMA engine operations
 379 */
 380
 381static int usb_dmac_alloc_chan_resources(struct dma_chan *chan)
 382{
 383        struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
 384        int ret;
 385
 386        while (uchan->descs_allocated < USB_DMAC_INITIAL_NR_DESC) {
 387                ret = usb_dmac_desc_alloc(uchan, USB_DMAC_INITIAL_NR_SG,
 388                                          GFP_KERNEL);
 389                if (ret < 0) {
 390                        usb_dmac_desc_free(uchan);
 391                        return ret;
 392                }
 393                uchan->descs_allocated++;
 394        }
 395
 396        return pm_runtime_get_sync(chan->device->dev);
 397}
 398
 399static void usb_dmac_free_chan_resources(struct dma_chan *chan)
 400{
 401        struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
 402        unsigned long flags;
 403
 404        /* Protect against ISR */
 405        spin_lock_irqsave(&uchan->vc.lock, flags);
 406        usb_dmac_chan_halt(uchan);
 407        spin_unlock_irqrestore(&uchan->vc.lock, flags);
 408
 409        usb_dmac_desc_free(uchan);
 410        vchan_free_chan_resources(&uchan->vc);
 411
 412        pm_runtime_put(chan->device->dev);
 413}
 414
 415static struct dma_async_tx_descriptor *
 416usb_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 417                       unsigned int sg_len, enum dma_transfer_direction dir,
 418                       unsigned long dma_flags, void *context)
 419{
 420        struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
 421        struct usb_dmac_desc *desc;
 422        struct scatterlist *sg;
 423        int i;
 424
 425        if (!sg_len) {
 426                dev_warn(chan->device->dev,
 427                         "%s: bad parameter: len=%d\n", __func__, sg_len);
 428                return NULL;
 429        }
 430
 431        desc = usb_dmac_desc_get(uchan, sg_len, GFP_NOWAIT);
 432        if (!desc)
 433                return NULL;
 434
 435        desc->direction = dir;
 436        desc->sg_len = sg_len;
 437        for_each_sg(sgl, sg, sg_len, i) {
 438                desc->sg[i].mem_addr = sg_dma_address(sg);
 439                desc->sg[i].size = sg_dma_len(sg);
 440        }
 441
 442        return vchan_tx_prep(&uchan->vc, &desc->vd, dma_flags);
 443}
 444
 445static int usb_dmac_chan_terminate_all(struct dma_chan *chan)
 446{
 447        struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
 448        struct usb_dmac_desc *desc, *_desc;
 449        unsigned long flags;
 450        LIST_HEAD(head);
 451        LIST_HEAD(list);
 452
 453        spin_lock_irqsave(&uchan->vc.lock, flags);
 454        usb_dmac_chan_halt(uchan);
 455        vchan_get_all_descriptors(&uchan->vc, &head);
 456        if (uchan->desc)
 457                uchan->desc = NULL;
 458        list_splice_init(&uchan->desc_got, &list);
 459        list_for_each_entry_safe(desc, _desc, &list, node)
 460                list_move_tail(&desc->node, &uchan->desc_freed);
 461        spin_unlock_irqrestore(&uchan->vc.lock, flags);
 462        vchan_dma_desc_free_list(&uchan->vc, &head);
 463
 464        return 0;
 465}
 466
 467static unsigned int usb_dmac_get_current_residue(struct usb_dmac_chan *chan,
 468                                                 struct usb_dmac_desc *desc,
 469                                                 int sg_index)
 470{
 471        struct usb_dmac_sg *sg = desc->sg + sg_index;
 472        u32 mem_addr = sg->mem_addr & 0xffffffff;
 473        unsigned int residue = sg->size;
 474
 475        /*
 476         * We cannot use USB_DMATCR to calculate residue because USB_DMATCR
 477         * has unsuited value to calculate.
 478         */
 479        if (desc->direction == DMA_DEV_TO_MEM)
 480                residue -= usb_dmac_chan_read(chan, USB_DMADAR) - mem_addr;
 481        else
 482                residue -= usb_dmac_chan_read(chan, USB_DMASAR) - mem_addr;
 483
 484        return residue;
 485}
 486
 487static u32 usb_dmac_chan_get_residue_if_complete(struct usb_dmac_chan *chan,
 488                                                 dma_cookie_t cookie)
 489{
 490        struct usb_dmac_desc *desc;
 491        u32 residue = 0;
 492
 493        list_for_each_entry_reverse(desc, &chan->desc_freed, node) {
 494                if (desc->done_cookie == cookie) {
 495                        residue = desc->residue;
 496                        break;
 497                }
 498        }
 499
 500        return residue;
 501}
 502
 503static u32 usb_dmac_chan_get_residue(struct usb_dmac_chan *chan,
 504                                     dma_cookie_t cookie)
 505{
 506        u32 residue = 0;
 507        struct virt_dma_desc *vd;
 508        struct usb_dmac_desc *desc = chan->desc;
 509        int i;
 510
 511        if (!desc) {
 512                vd = vchan_find_desc(&chan->vc, cookie);
 513                if (!vd)
 514                        return 0;
 515                desc = to_usb_dmac_desc(vd);
 516        }
 517
 518        /* Compute the size of all usb_dmac_sg still to be transferred */
 519        for (i = desc->sg_index + 1; i < desc->sg_len; i++)
 520                residue += desc->sg[i].size;
 521
 522        /* Add the residue for the current sg */
 523        residue += usb_dmac_get_current_residue(chan, desc, desc->sg_index);
 524
 525        return residue;
 526}
 527
 528static enum dma_status usb_dmac_tx_status(struct dma_chan *chan,
 529                                          dma_cookie_t cookie,
 530                                          struct dma_tx_state *txstate)
 531{
 532        struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
 533        enum dma_status status;
 534        unsigned int residue = 0;
 535        unsigned long flags;
 536
 537        status = dma_cookie_status(chan, cookie, txstate);
 538        /* a client driver will get residue after DMA_COMPLETE */
 539        if (!txstate)
 540                return status;
 541
 542        spin_lock_irqsave(&uchan->vc.lock, flags);
 543        if (status == DMA_COMPLETE)
 544                residue = usb_dmac_chan_get_residue_if_complete(uchan, cookie);
 545        else
 546                residue = usb_dmac_chan_get_residue(uchan, cookie);
 547        spin_unlock_irqrestore(&uchan->vc.lock, flags);
 548
 549        dma_set_residue(txstate, residue);
 550
 551        return status;
 552}
 553
 554static void usb_dmac_issue_pending(struct dma_chan *chan)
 555{
 556        struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
 557        unsigned long flags;
 558
 559        spin_lock_irqsave(&uchan->vc.lock, flags);
 560        if (vchan_issue_pending(&uchan->vc) && !uchan->desc)
 561                usb_dmac_chan_start_desc(uchan);
 562        spin_unlock_irqrestore(&uchan->vc.lock, flags);
 563}
 564
 565static void usb_dmac_virt_desc_free(struct virt_dma_desc *vd)
 566{
 567        struct usb_dmac_desc *desc = to_usb_dmac_desc(vd);
 568        struct usb_dmac_chan *chan = to_usb_dmac_chan(vd->tx.chan);
 569
 570        usb_dmac_desc_put(chan, desc);
 571}
 572
 573/* -----------------------------------------------------------------------------
 574 * IRQ handling
 575 */
 576
 577static void usb_dmac_isr_transfer_end(struct usb_dmac_chan *chan)
 578{
 579        struct usb_dmac_desc *desc = chan->desc;
 580
 581        BUG_ON(!desc);
 582
 583        if (++desc->sg_index < desc->sg_len) {
 584                usb_dmac_chan_start_sg(chan, desc->sg_index);
 585        } else {
 586                desc->residue = usb_dmac_get_current_residue(chan, desc,
 587                                                        desc->sg_index - 1);
 588                desc->done_cookie = desc->vd.tx.cookie;
 589                vchan_cookie_complete(&desc->vd);
 590
 591                /* Restart the next transfer if this driver has a next desc */
 592                usb_dmac_chan_start_desc(chan);
 593        }
 594}
 595
 596static irqreturn_t usb_dmac_isr_channel(int irq, void *dev)
 597{
 598        struct usb_dmac_chan *chan = dev;
 599        irqreturn_t ret = IRQ_NONE;
 600        u32 mask = 0;
 601        u32 chcr;
 602        bool xfer_end = false;
 603
 604        spin_lock(&chan->vc.lock);
 605
 606        chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
 607        if (chcr & (USB_DMACHCR_TE | USB_DMACHCR_SP)) {
 608                mask |= USB_DMACHCR_DE | USB_DMACHCR_TE | USB_DMACHCR_SP;
 609                if (chcr & USB_DMACHCR_DE)
 610                        xfer_end = true;
 611                ret |= IRQ_HANDLED;
 612        }
 613        if (chcr & USB_DMACHCR_NULL) {
 614                /* An interruption of TE will happen after we set FTE */
 615                mask |= USB_DMACHCR_NULL;
 616                chcr |= USB_DMACHCR_FTE;
 617                ret |= IRQ_HANDLED;
 618        }
 619        if (mask)
 620                usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
 621
 622        if (xfer_end)
 623                usb_dmac_isr_transfer_end(chan);
 624
 625        spin_unlock(&chan->vc.lock);
 626
 627        return ret;
 628}
 629
 630/* -----------------------------------------------------------------------------
 631 * OF xlate and channel filter
 632 */
 633
 634static bool usb_dmac_chan_filter(struct dma_chan *chan, void *arg)
 635{
 636        struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
 637        struct of_phandle_args *dma_spec = arg;
 638
 639        /* USB-DMAC should be used with fixed usb controller's FIFO */
 640        if (uchan->index != dma_spec->args[0])
 641                return false;
 642
 643        return true;
 644}
 645
 646static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
 647                                          struct of_dma *ofdma)
 648{
 649        struct dma_chan *chan;
 650        dma_cap_mask_t mask;
 651
 652        if (dma_spec->args_count != 1)
 653                return NULL;
 654
 655        /* Only slave DMA channels can be allocated via DT */
 656        dma_cap_zero(mask);
 657        dma_cap_set(DMA_SLAVE, mask);
 658
 659        chan = __dma_request_channel(&mask, usb_dmac_chan_filter, dma_spec,
 660                                     ofdma->of_node);
 661        if (!chan)
 662                return NULL;
 663
 664        return chan;
 665}
 666
 667/* -----------------------------------------------------------------------------
 668 * Power management
 669 */
 670
 671#ifdef CONFIG_PM
 672static int usb_dmac_runtime_suspend(struct device *dev)
 673{
 674        struct usb_dmac *dmac = dev_get_drvdata(dev);
 675        int i;
 676
 677        for (i = 0; i < dmac->n_channels; ++i) {
 678                if (!dmac->channels[i].iomem)
 679                        break;
 680                usb_dmac_chan_halt(&dmac->channels[i]);
 681        }
 682
 683        return 0;
 684}
 685
 686static int usb_dmac_runtime_resume(struct device *dev)
 687{
 688        struct usb_dmac *dmac = dev_get_drvdata(dev);
 689
 690        return usb_dmac_init(dmac);
 691}
 692#endif /* CONFIG_PM */
 693
 694static const struct dev_pm_ops usb_dmac_pm = {
 695        SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
 696                                      pm_runtime_force_resume)
 697        SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume,
 698                           NULL)
 699};
 700
 701/* -----------------------------------------------------------------------------
 702 * Probe and remove
 703 */
 704
 705static int usb_dmac_chan_probe(struct usb_dmac *dmac,
 706                               struct usb_dmac_chan *uchan,
 707                               unsigned int index)
 708{
 709        struct platform_device *pdev = to_platform_device(dmac->dev);
 710        char pdev_irqname[5];
 711        char *irqname;
 712        int ret;
 713
 714        uchan->index = index;
 715        uchan->iomem = dmac->iomem + USB_DMAC_CHAN_OFFSET(index);
 716
 717        /* Request the channel interrupt. */
 718        sprintf(pdev_irqname, "ch%u", index);
 719        uchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
 720        if (uchan->irq < 0)
 721                return -ENODEV;
 722
 723        irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
 724                                 dev_name(dmac->dev), index);
 725        if (!irqname)
 726                return -ENOMEM;
 727
 728        ret = devm_request_irq(dmac->dev, uchan->irq, usb_dmac_isr_channel,
 729                               IRQF_SHARED, irqname, uchan);
 730        if (ret) {
 731                dev_err(dmac->dev, "failed to request IRQ %u (%d)\n",
 732                        uchan->irq, ret);
 733                return ret;
 734        }
 735
 736        uchan->vc.desc_free = usb_dmac_virt_desc_free;
 737        vchan_init(&uchan->vc, &dmac->engine);
 738        INIT_LIST_HEAD(&uchan->desc_freed);
 739        INIT_LIST_HEAD(&uchan->desc_got);
 740
 741        return 0;
 742}
 743
 744static int usb_dmac_parse_of(struct device *dev, struct usb_dmac *dmac)
 745{
 746        struct device_node *np = dev->of_node;
 747        int ret;
 748
 749        ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
 750        if (ret < 0) {
 751                dev_err(dev, "unable to read dma-channels property\n");
 752                return ret;
 753        }
 754
 755        if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
 756                dev_err(dev, "invalid number of channels %u\n",
 757                        dmac->n_channels);
 758                return -EINVAL;
 759        }
 760
 761        return 0;
 762}
 763
 764static int usb_dmac_probe(struct platform_device *pdev)
 765{
 766        const enum dma_slave_buswidth widths = USB_DMAC_SLAVE_BUSWIDTH;
 767        struct dma_device *engine;
 768        struct usb_dmac *dmac;
 769        struct resource *mem;
 770        unsigned int i;
 771        int ret;
 772
 773        dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
 774        if (!dmac)
 775                return -ENOMEM;
 776
 777        dmac->dev = &pdev->dev;
 778        platform_set_drvdata(pdev, dmac);
 779
 780        ret = usb_dmac_parse_of(&pdev->dev, dmac);
 781        if (ret < 0)
 782                return ret;
 783
 784        dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
 785                                      sizeof(*dmac->channels), GFP_KERNEL);
 786        if (!dmac->channels)
 787                return -ENOMEM;
 788
 789        /* Request resources. */
 790        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 791        dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
 792        if (IS_ERR(dmac->iomem))
 793                return PTR_ERR(dmac->iomem);
 794
 795        /* Enable runtime PM and initialize the device. */
 796        pm_runtime_enable(&pdev->dev);
 797        ret = pm_runtime_get_sync(&pdev->dev);
 798        if (ret < 0) {
 799                dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
 800                goto error_pm;
 801        }
 802
 803        ret = usb_dmac_init(dmac);
 804
 805        if (ret) {
 806                dev_err(&pdev->dev, "failed to reset device\n");
 807                goto error;
 808        }
 809
 810        /* Initialize the channels. */
 811        INIT_LIST_HEAD(&dmac->engine.channels);
 812
 813        for (i = 0; i < dmac->n_channels; ++i) {
 814                ret = usb_dmac_chan_probe(dmac, &dmac->channels[i], i);
 815                if (ret < 0)
 816                        goto error;
 817        }
 818
 819        /* Register the DMAC as a DMA provider for DT. */
 820        ret = of_dma_controller_register(pdev->dev.of_node, usb_dmac_of_xlate,
 821                                         NULL);
 822        if (ret < 0)
 823                goto error;
 824
 825        /*
 826         * Register the DMA engine device.
 827         *
 828         * Default transfer size of 32 bytes requires 32-byte alignment.
 829         */
 830        engine = &dmac->engine;
 831        dma_cap_set(DMA_SLAVE, engine->cap_mask);
 832
 833        engine->dev = &pdev->dev;
 834
 835        engine->src_addr_widths = widths;
 836        engine->dst_addr_widths = widths;
 837        engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
 838        engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
 839
 840        engine->device_alloc_chan_resources = usb_dmac_alloc_chan_resources;
 841        engine->device_free_chan_resources = usb_dmac_free_chan_resources;
 842        engine->device_prep_slave_sg = usb_dmac_prep_slave_sg;
 843        engine->device_terminate_all = usb_dmac_chan_terminate_all;
 844        engine->device_tx_status = usb_dmac_tx_status;
 845        engine->device_issue_pending = usb_dmac_issue_pending;
 846
 847        ret = dma_async_device_register(engine);
 848        if (ret < 0)
 849                goto error;
 850
 851        pm_runtime_put(&pdev->dev);
 852        return 0;
 853
 854error:
 855        of_dma_controller_free(pdev->dev.of_node);
 856        pm_runtime_put(&pdev->dev);
 857error_pm:
 858        pm_runtime_disable(&pdev->dev);
 859        return ret;
 860}
 861
 862static void usb_dmac_chan_remove(struct usb_dmac *dmac,
 863                                 struct usb_dmac_chan *uchan)
 864{
 865        usb_dmac_chan_halt(uchan);
 866        devm_free_irq(dmac->dev, uchan->irq, uchan);
 867}
 868
 869static int usb_dmac_remove(struct platform_device *pdev)
 870{
 871        struct usb_dmac *dmac = platform_get_drvdata(pdev);
 872        int i;
 873
 874        for (i = 0; i < dmac->n_channels; ++i)
 875                usb_dmac_chan_remove(dmac, &dmac->channels[i]);
 876        of_dma_controller_free(pdev->dev.of_node);
 877        dma_async_device_unregister(&dmac->engine);
 878
 879        pm_runtime_disable(&pdev->dev);
 880
 881        return 0;
 882}
 883
 884static void usb_dmac_shutdown(struct platform_device *pdev)
 885{
 886        struct usb_dmac *dmac = platform_get_drvdata(pdev);
 887
 888        usb_dmac_stop(dmac);
 889}
 890
 891static const struct of_device_id usb_dmac_of_ids[] = {
 892        { .compatible = "renesas,usb-dmac", },
 893        { /* Sentinel */ }
 894};
 895MODULE_DEVICE_TABLE(of, usb_dmac_of_ids);
 896
 897static struct platform_driver usb_dmac_driver = {
 898        .driver         = {
 899                .pm     = &usb_dmac_pm,
 900                .name   = "usb-dmac",
 901                .of_match_table = usb_dmac_of_ids,
 902        },
 903        .probe          = usb_dmac_probe,
 904        .remove         = usb_dmac_remove,
 905        .shutdown       = usb_dmac_shutdown,
 906};
 907
 908module_platform_driver(usb_dmac_driver);
 909
 910MODULE_DESCRIPTION("Renesas USB DMA Controller Driver");
 911MODULE_AUTHOR("Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>");
 912MODULE_LICENSE("GPL v2");
 913